diff --git a/.gitignore b/.gitignore index bf827417..d669f417 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,6 @@ glide.lock .DS_Store .terraform/ gpg/ + +# ignore. For testing we don't want to store a specific provider. +.terraform.lock.hcl \ No newline at end of file diff --git a/README.md b/README.md index 8a4af7bf..38ec7948 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ This provider is for creating and modifying IAM roles via the ALKS API. ## Pre-Requisites -* An ALKS Admin or IAMAdmin STS [assume-role](http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) session is needed. PowerUser access is not sufficient to create IAM roles. +* An ALKS Admin or IAMAdmin STS [assume-role](http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) session needed. PowerUser access is not sufficient to create IAM roles. * This tool is best suited for users with an `Admin` role * With an `IAMAdmin|LabAdmin` role, you can create roles and attach policies, but you can't create other infrastructure. * Works with [Terraform](https://www.terraform.io/) version `0.10.0` or newer. diff --git a/data_source_alks_keys.go b/data_source_alks_keys.go index f09c6766..34c047db 100644 --- a/data_source_alks_keys.go +++ b/data_source_alks_keys.go @@ -2,7 +2,7 @@ package main import ( "github.com/Cox-Automotive/alks-go" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "log" "strings" ) diff --git a/docs/guides/local_installation.md b/docs/guides/local_installation.md index d4eef606..26c6f781 100644 --- a/docs/guides/local_installation.md +++ b/docs/guides/local_installation.md @@ -47,11 +47,11 @@ mkdir -p ~/.terraform.d/plugins && **One-liner download for macOS / Linux:** ```sh -mkdir -p ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/1.5.14/darwin_amd64 && - curl -Ls https://api.github.com/repos/Cox-Automotive/terraform-provider-alks/releases | jq -r --arg release "v1.5.14" --arg arch "$(uname -s | tr A-Z a-z)" '.[] | select(.tag_name | contains($release)) | .assets[]| select(.browser_download_url | contains($arch)) | select(.browser_download_url | contains("amd64")) | .browser_download_url' | - xargs -n 1 curl -Lo ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/1.5.14/darwin_amd64/terraform-provider-alks.zip && - pushd ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/1.5.14/darwin_amd64 && - unzip ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/1.5.14/darwin_amd64/terraform-provider-alks.zip -d terraform-provider-alks-tmp && +mkdir -p ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/2.0.0/darwin_amd64 && + curl -Ls https://api.github.com/repos/Cox-Automotive/terraform-provider-alks/releases | jq -r --arg release "v2.0.0" --arg arch "$(uname -s | tr A-Z a-z)" '.[] | select(.tag_name | contains($release)) | .assets[]| select(.browser_download_url | contains($arch)) | select(.browser_download_url | contains("amd64")) | .browser_download_url' | + xargs -n 1 curl -Lo ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/2.0.0/darwin_amd64/terraform-provider-alks.zip && + pushd ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/2.0.0/darwin_amd64 && + unzip ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/2.0.0/darwin_amd64/terraform-provider-alks.zip -d terraform-provider-alks-tmp && mv terraform-provider-alks-tmp/terraform-provider-alks* . && chmod +x terraform-provider-alks* && rm -rf terraform-provider-alks-tmp && @@ -93,6 +93,6 @@ mkdir -p ~/.terraform.d/plugins/Cox-Automotive/engineering-enablement/alks/1.5.1 ### Supported Versions -| Terraform 0.10.x | Terraform 0.12.x | Terraform 0.13.x | Terraform 0.14.x | -| ---------------- | ---------------- | ---------------- | ---------------- | -| ALKS TFP 0.9.0 < 1.3.0 | ALKS TFP 1.3.0+ | ALKS TFP 1.3.0+ | ALKS TFP 1.3.0+ | +| Terraform 0.10.x | Terraform 0.11.x | Terraform 0.12.x | Terraform 0.13.x | Terraform 0.14.x | +| ---------------- | ---------------- | ---------------- | ---------------- | ---------------- | +| ALKS TFP 0.9.0 < 1.3.0 | ALKS TFP 1.3.0 < 1.5.15 | ALKS TFP 1.3.0+ | ALKS TFP 1.3.0+ | ALKS TFP 1.3.0+ | diff --git a/examples/alks.tf b/examples/alks.tf index 6ac2f0a2..36c7bd3f 100644 --- a/examples/alks.tf +++ b/examples/alks.tf @@ -34,7 +34,7 @@ provider "aws" { # CREATE IAM ROLE -- Initial Provider resource "alks_iamrole" "test_role" { - name = "TEST-DELETE" + name = "TEST-DELETE-PRIMARY-PROVIDER" type = "AWS CodeBuild" include_default_policies = false enable_alks_access = true @@ -55,7 +55,7 @@ resource "alks_iamrole" "test_dynamic_role" { # CREATE IAM ROLE -- Secondary Provider resource "alks_iamrole" "test_role_nonprod" { provider = alks.nonprod - name = "TEST-DELETE" + name = "TEST-DELETE-SECONDARY-PROVIDER" type = "AWS CodeBuild" include_default_policies = false enable_alks_access = true @@ -89,5 +89,5 @@ resource "aws_iam_role_policy_attachment" "sr-attach" { # CREATE LTK USER resource "alks_ltk" "ltk" { - iam_username = "TEST_LTK_USER" + iam_username = "TEST-LTK-USER" } diff --git a/examples/versions.tf b/examples/versions.tf index 6273078f..856b2d1a 100644 --- a/examples/versions.tf +++ b/examples/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { alks = { source = "Cox-Automotive/alks" - version = "1.5.8" + version = "2.0.0" } aws = { source = "hashicorp/aws" diff --git a/go.mod b/go.mod index 80b4b62b..262e0458 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,6 @@ require ( github.com/Cox-Automotive/alks-go v0.0.0-20210414185953-754a7e5f7114 github.com/aws/aws-sdk-go v1.31.15 github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/terraform v0.12.26 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 github.com/mitchellh/go-homedir v1.1.0 ) diff --git a/go.sum b/go.sum index 4410dcf4..a137cc8b 100644 --- a/go.sum +++ b/go.sum @@ -3,372 +3,351 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.61.0 h1:NLQf5e1OMspfNT1RAHOB3ublr1TW3YTXO8OiWwVjK2U= +cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/azure/cli v0.2.0/go.mod h1:WWTbGPvkAg3I4ms2j2s+Zr5xCGwGqTQh+6M2ZqOczkE= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= -github.com/Cox-Automotive/alks-go v0.0.0-20210331173447-21b08aca8d67 h1:vcruzED50hPyycvR6mtPXKy2zQoH+AOicCOuwiWr7Cg= -github.com/Cox-Automotive/alks-go v0.0.0-20210331173447-21b08aca8d67/go.mod h1:jJNgXthl59Vt2tJHSC3WZ0vlopV9xqdclfQuLgwHjOw= github.com/Cox-Automotive/alks-go v0.0.0-20210414185953-754a7e5f7114 h1:U6UO6xb3cFJTMLCJWMVxkhWq2vCrX5c7QnPlsQCPGZE= github.com/Cox-Automotive/alks-go v0.0.0-20210414185953-754a7e5f7114/go.mod h1:jJNgXthl59Vt2tJHSC3WZ0vlopV9xqdclfQuLgwHjOw= -github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= -github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= -github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw= -github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= -github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= -github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= +github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-versions v0.0.2-0.20180815153302-64b99f7cb171/go.mod h1:JXY95WvQrPJQtudvNARshgWajS7jNNlM90altXIPNyI= -github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.31.15 h1:1Ahi6nvJLg5cjO5i3U7BWh91/zOw//tqOTLpLnIeyss= github.com/aws/aws-sdk-go v1.31.15/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmatcuk/doublestar v1.1.5 h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk= -github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= -github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc= +github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk= +github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ= -github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-azure-helpers v0.10.0/go.mod h1:YuAtHxm2v74s+IjQwUG88dHBJPd5jL+cXr5BGVzSKhE= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 h1:l1KB3bHVdvegcIf5upQ5mjcHjs2qsWnKh4Yr9xgIuu8= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= +github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f h1:Yv9YzBlAETjy6AOX9eLBZ3nshNVRREgerT/3nvxlGho= -github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= -github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-slug v0.4.1/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= -github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-tfe v0.8.1/go.mod h1:XAV72S4O1iP8BDaqiaPLmL2B4EE6almocnOn8E8stHc= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI= -github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= -github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= -github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= -github.com/hashicorp/terraform v0.12.26 h1:FVsTCH1DMvTlzVSO2sKCzkwLczf/eZBO4GuY5IbHFk4= -github.com/hashicorp/terraform v0.12.26/go.mod h1:CBxNAiTW0pLap44/3GU4j7cYE2bMhkKZNlHPcr4P55U= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-exec v0.13.0 h1:1Pth+pdWJAufJuWWjaVOVNEkoRTOjGn3hQpAqj4aPdg= +github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= +github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= +github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= +github.com/hashicorp/terraform-plugin-go v0.2.1 h1:EW/R8bB2Zbkjmugzsy1d27yS8/0454b3MtYHkzOknqA= +github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 h1:DGnxpIYRHXQZb2TOlQ1OCEYxoRQrAcbLIcYm8kvbFuU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3/go.mod h1:5wrrTcxbSaQXamCDbHZTHk6yTF9OEZaOvQ9fvLXBE3o= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/likexian/gokit v0.0.0-20190309162924-0a377eecf7aa/go.mod h1:QdfYv6y6qPA9pbBA2qXtoT8BMKha6UyNbxWGWl/9Jfk= -github.com/likexian/gokit v0.0.0-20190418170008-ace88ad0983b/go.mod h1:KKqSnk/VVSW8kEyO2vVCXoanzEutKdlBAPohmGXkxCk= -github.com/likexian/gokit v0.0.0-20190501133040-e77ea8b19cdc/go.mod h1:3kvONayqCaj+UgrRZGpgfXzHdMYCAO0KAt4/8n0L57Y= -github.com/likexian/gokit v0.20.15/go.mod h1:kn+nTv3tqh6yhor9BC4Lfiu58SmH8NmQ2PmEl+uM6nU= -github.com/likexian/simplejson-go v0.0.0-20190409170913-40473a74d76d/go.mod h1:Typ1BfnATYtZ/+/shXfFYLrovhFyuKvzwrdOnIDHlmg= -github.com/likexian/simplejson-go v0.0.0-20190419151922-c1f9f0b4f084/go.mod h1:U4O1vIJvIKwbMZKUJ62lppfdvkCdVd2nfMimHK81eec= -github.com/likexian/simplejson-go v0.0.0-20190502021454-d8787b4bfa0b/go.mod h1:3BWwtmKP9cXWwYCr5bkoVDEfLywacOv0s06OBEDpyt8= -github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= -github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= -github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.4 h1:ZU1VNC02qyufSZsjjs7+khruk2fKvbQ3TwRV/IBCeFA= +github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= -github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/panicwrap v1.0.0/go.mod h1:pKvZHwWrZowLUzftuFq7coarnxbBXU4aQh3N0BJOeeA= -github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/motain/gocheck v0.0.0-20131023154940-9beb271d26e6 h1:gKdQPVb3yDSbcw4sgNyrt2LP0/4uTdrvTm3e4IcATCE= github.com/motain/gocheck v0.0.0-20131023154940-9beb271d26e6/go.mod h1:RnPn6D1AAyccwR5T+py4G3eMhZuqr0/pGM6Ygpu1tDc= -github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= +github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= -github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= -github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= -github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c/go.mod h1:wk2XFUg6egk4tSDNZtXeKfe2G6690UVyt163PuUxBZk= -github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew= -github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -376,23 +355,38 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -403,14 +397,33 @@ golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -421,17 +434,62 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed h1:+qzWo37K31KxduIYaBeMqJ8MUOyTayOQKpH9aDPLMSY= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -439,30 +497,76 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= +google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/main.go b/main.go index 5ecf31ec..972c279f 100644 --- a/main.go +++ b/main.go @@ -1,13 +1,13 @@ package main import ( - "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" ) func main() { plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() terraform.ResourceProvider { + ProviderFunc: func() *schema.Provider { return Provider() }, }) diff --git a/provider.go b/provider.go index 9869ea43..1dd2d0f3 100644 --- a/provider.go +++ b/provider.go @@ -1,24 +1,25 @@ package main import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "log" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - homedir "github.com/mitchellh/go-homedir" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mitchellh/go-homedir" ) // Provider returns a terraform.ResourceProvider. -func Provider() terraform.ResourceProvider { +func Provider() *schema.Provider { return &schema.Provider{ Schema: map[string]*schema.Schema{ - "url": &schema.Schema{ + "url": { Type: schema.TypeString, Required: true, Description: "This is the base URL to ALKS service. It must be provided, but it can also be sourced from the ALKS_URL environment variable.", DefaultFunc: schema.EnvDefaultFunc("ALKS_URL", nil), }, - "access_key": &schema.Schema{ + "access_key": { Type: schema.TypeString, Optional: true, Description: "This is the AWS access key. It must be provided, but it can also be sourced from the ALKS_ACCESS_KEY_ID or AWS_ACCESS_KEY_ID environment variable.", @@ -27,7 +28,7 @@ func Provider() terraform.ResourceProvider { "AWS_ACCESS_KEY_ID", }, nil), }, - "secret_key": &schema.Schema{ + "secret_key": { Type: schema.TypeString, Optional: true, Description: "This is the AWS secret key. It must be provided, but it can also be sourced from the ALKS_SECRET_ACCESS_KEY or AWS_SECRET_ACCESS_KEY environment variable", @@ -36,7 +37,7 @@ func Provider() terraform.ResourceProvider { "AWS_SECRET_ACCESS_KEY", }, nil), }, - "token": &schema.Schema{ + "token": { Type: schema.TypeString, Optional: true, Description: "This is the AWS session token. It must be provided, but it can also be sourced from the ALKS_SESSION_TOKEN or AWS_SESSION_TOKEN environment variable", @@ -84,7 +85,7 @@ func Provider() terraform.ResourceProvider { "alks_keys": dataSourceAlksKeys(), }, - ConfigureFunc: providerConfigure, + ConfigureContextFunc: providerConfigure, } } @@ -120,7 +121,10 @@ func assumeRoleSchema() *schema.Schema { } } -func providerConfigure(d *schema.ResourceData) (interface{}, error) { +func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { + + var diags diag.Diagnostics + config := Config{ URL: d.Get("url").(string), AccessKey: d.Get("access_key").(string), @@ -143,10 +147,15 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { // Set CredsFilename, expanding home directory credsPath, err := homedir.Expand(d.Get("shared_credentials_file").(string)) if err != nil { - return nil, err + return nil, diag.FromErr(err) } config.CredsFilename = credsPath + c, err := config.Client() + if err != nil { + return nil, diag.FromErr(err) + } + log.Println("[INFO] Initializing ALKS client") - return config.Client() + return c, diags } diff --git a/provider_test.go b/provider_test.go index 55e40869..e6bbac50 100644 --- a/provider_test.go +++ b/provider_test.go @@ -4,28 +4,27 @@ import ( "os" "testing" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -var testAccProviders map[string]terraform.ResourceProvider +var testAccProviders map[string]*schema.Provider var testAccProvider *schema.Provider func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ + testAccProvider = Provider() + testAccProviders = map[string]*schema.Provider{ "alks": testAccProvider, } } func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + if err := Provider().InternalValidate(); err != nil { t.Fatalf("Error: %s", err) } } func TestProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = Provider() + var _ = Provider() } func testAccPreCheck(t *testing.T) { diff --git a/resource_alks_iamrole.go b/resource_alks_iamrole.go index 8c971fa9..1c7b330e 100644 --- a/resource_alks_iamrole.go +++ b/resource_alks_iamrole.go @@ -2,14 +2,13 @@ package main import ( "fmt" + "github.com/Cox-Automotive/alks-go" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "log" "strings" "time" - - alks "github.com/Cox-Automotive/alks-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" ) func resourceAlksIamRole() *schema.Resource { @@ -20,46 +19,46 @@ func resourceAlksIamRole() *schema.Resource { Exists: resourceAlksIamRoleExists, Delete: resourceAlksIamRoleDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, SchemaVersion: 1, MigrateState: migrateState, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "include_default_policies": &schema.Schema{ + "include_default_policies": { Type: schema.TypeBool, Required: true, ForceNew: true, }, - "role_added_to_ip": &schema.Schema{ + "role_added_to_ip": { Type: schema.TypeBool, Computed: true, }, - "arn": &schema.Schema{ + "arn": { Type: schema.TypeString, Computed: true, }, - "ip_arn": &schema.Schema{ + "ip_arn": { Type: schema.TypeString, Computed: true, }, - "enable_alks_access": &schema.Schema{ + "enable_alks_access": { Type: schema.TypeBool, Default: false, Optional: true, }, - "template_fields": &schema.Schema{ + "template_fields": { Type: schema.TypeMap, Elem: schema.TypeString, ForceNew: true, @@ -84,34 +83,34 @@ func resourceAlksIamTrustRole() *schema.Resource { MigrateState: migrateState, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "trust_arn": &schema.Schema{ + "trust_arn": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "role_added_to_ip": &schema.Schema{ + "role_added_to_ip": { Type: schema.TypeBool, Computed: true, }, - "arn": &schema.Schema{ + "arn": { Type: schema.TypeString, Computed: true, }, - "ip_arn": &schema.Schema{ + "ip_arn": { Type: schema.TypeString, Computed: true, }, - "enable_alks_access": &schema.Schema{ + "enable_alks_access": { Type: schema.TypeBool, Default: false, Optional: true, @@ -179,7 +178,7 @@ func resourceAlksIamTrustRoleCreate(d *schema.ResourceData, meta interface{}) er time.Sleep(15 * time.Second) return resource.RetryableError(err) } - return resource.NonRetryableError(err) + return nil }) if err != nil { @@ -271,8 +270,6 @@ func resourceAlksIamRoleUpdate(d *schema.ResourceData, meta interface{}) error { if err := updateAlksAccess(d, meta); err != nil { return err } - - d.SetPartial("enable_alks_access") } d.Partial(false) diff --git a/resource_alks_iamrole_test.go b/resource_alks_iamrole_test.go index 9c6353fa..e4661cb3 100644 --- a/resource_alks_iamrole_test.go +++ b/resource_alks_iamrole_test.go @@ -5,9 +5,9 @@ import ( "log" "testing" - alks "github.com/Cox-Automotive/alks-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/Cox-Automotive/alks-go" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAlksIamRole_Basic(t *testing.T) { @@ -18,7 +18,7 @@ func TestAccAlksIamRole_Basic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAlksIamRoleDestroy(&resp), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccCheckAlksIamRoleConfigBasic, Check: resource.ComposeTestCheckFunc( // testAccCheckAlksIamRoleExists("bar420", &resp), @@ -31,7 +31,7 @@ func TestAccAlksIamRole_Basic(t *testing.T) { "alks_iamrole.foo", "include_default_policies", "false"), ), }, - resource.TestStep{ + { // update the resource Config: testAccCheckAlksIamRoleConfigUpdateBasic, Check: resource.ComposeTestCheckFunc( @@ -57,7 +57,7 @@ func TestAccAlksIamTrustRole_Basic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAlksIamRoleDestroy(&resp), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccCheckAlksIamTrustRoleConfigBasic, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( @@ -66,7 +66,7 @@ func TestAccAlksIamTrustRole_Basic(t *testing.T) { "alks_iamtrustrole.bar", "type", "Inner Account"), ), }, - resource.TestStep{ + { // update the resource Config: testAccCheckAlksIamTrustRoleConfigUpdateBasic, Check: resource.ComposeTestCheckFunc( diff --git a/resource_alks_ltk.go b/resource_alks_ltk.go index 25d8699d..43c7dd13 100644 --- a/resource_alks_ltk.go +++ b/resource_alks_ltk.go @@ -3,8 +3,8 @@ package main import ( "log" - alks "github.com/Cox-Automotive/alks-go" - "github.com/hashicorp/terraform/helper/schema" + "github.com/Cox-Automotive/alks-go" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceAlksLtk() *schema.Resource { @@ -14,28 +14,27 @@ func resourceAlksLtk() *schema.Resource { Delete: resourceAlksLtkDelete, Exists: resourceAlksLtkExists, Importer: &schema.ResourceImporter{ - // Terraform provided importer - State: schema.ImportStatePassthrough, + StateContext: schema.ImportStatePassthroughContext, }, SchemaVersion: 1, Schema: map[string]*schema.Schema{ - "iam_username": &schema.Schema{ + "iam_username": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "iam_user_arn": &schema.Schema{ + "iam_user_arn": { Type: schema.TypeString, Computed: true, }, - "access_key": &schema.Schema{ + "access_key": { Sensitive: true, Type: schema.TypeString, Computed: true, }, - "secret_key": &schema.Schema{ + "secret_key": { Sensitive: true, Type: schema.TypeString, Computed: true, diff --git a/resource_alks_ltk_test.go b/resource_alks_ltk_test.go index 1d789a7d..e5ea18d6 100644 --- a/resource_alks_ltk_test.go +++ b/resource_alks_ltk_test.go @@ -2,9 +2,9 @@ package main import ( "fmt" - alks "github.com/Cox-Automotive/alks-go" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/Cox-Automotive/alks-go" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "testing" ) @@ -17,12 +17,12 @@ func TestAlksLTKCreate(t *testing.T) { CheckDestroy: testAlksLtkDestroy(&resp), Steps: []resource.TestStep{ // Create the resource - resource.TestStep{ + { Config: testAlksLTKCreateConfig, Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("alks_ltk.foo", "iam_username", "TEST_LTK_USER")), }, // Update the resource - resource.TestStep{ + { Config: testAlksLTKUpdateConfig, Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("alks_ltk.foo", "iam_username", "TEST_LTK_USER_2")), }, diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore new file mode 100644 index 00000000..ee9694b8 --- /dev/null +++ b/vendor/cloud.google.com/go/.gitignore @@ -0,0 +1,11 @@ +# Editors +.idea +.vscode +*.swp + +# Test files +*.test +coverage.txt + +# Other +.DS_Store diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md new file mode 100644 index 00000000..3e9fca4a --- /dev/null +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -0,0 +1,1566 @@ +# Changes + +## v0.61.0 + +### Changes + +- all: + - Update all direct dependencies. +- dashboard: + - Start generating client for apiv1. +- policytroubleshooter: + - Start generating client for apiv1. +- profiler: + - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`. +- Various updates to autogenerated clients. + +## v0.60.0 + +### Changes + +- all: + - Refactored examples to reduce module dependencies. + - Update sub-modules to use cloud.google.com/go v0.59.0. +- internal: + - Start generating client for gaming apiv1beta. +- Various updates to autogenerated clients. + +## v0.59.0 + +### Announcements + +goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our +contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept +pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to +point to GitHub. + +### Changes + +- all: + - Remove dependency on honnef.co/go/tools. + - Update our contributing instructions now that we use GitHub for reviews. + - Remove some un-inclusive terminology. +- compute/metadata: + - Pass cancelable context to DNS lookup. +- .github: + - Update templates issue/PR templates. +- internal: + - Bump several clients to GA. + - Fix GoDoc badge source. + - Several automation changes related to the move to GitHub. + - Start generating a client for asset v1p5beta1. +- Various updates to autogenerated clients. + +## v0.58.0 + +### Deprecation notice + +- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking + changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`. + +### Changes + +- all: + - The remaining uses of gtransport.Dial have been removed. + - The `genproto` dependency has been updated to a version that makes use of + new `protoreflect` library. For more information on these protobuf changes + please see the following post from the official Go blog: + https://blog.golang.org/protobuf-apiv2. +- internal: + - Started generation of datastore admin v1 client. + - Updated protofuf version used for generation to 3.12.X. + - Update the release levels for several APIs. + - Generate clients with protoc-gen-go@v1.4.1. +- monitoring: + - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation + notice above). +- profiler: + - Fixed flakiness in tests. +- Various updates to autogenerated clients. + +## v0.57.0 + +- all: + - Update module dependency `google.golang.org/api` to `v0.21.0`. +- errorreporting: + - Add exported SetGoogleClientInfo wrappers to manual file. +- expr/v1alpha1: + - Deprecate client. This client will be removed in a future release. +- internal: + - Fix possible data race in TestTracer. + - Pin versions of tools used for generation. + - Correct the release levels for BigQuery APIs. + - Start generation osconfig v1. +- longrunning: + - Add exported SetGoogleClientInfo wrappers to manual file. +- monitoring: + - Stop generation of monitoring/apiv3 because of incoming breaking change. +- trace: + - Add exported SetGoogleClientInfo wrappers to manual file. +- Various updates to autogenerated clients. + +## v0.56.0 + +- secretmanager: + - add IAM helper +- profiler: + - try all us-west1 zones for integration tests +- internal: + - add config to generate webrisk v1 + - add repo and commit to buildcop invocation + - add recaptchaenterprise v1 generation config + - update microgenerator to v0.12.5 + - add datacatalog client + - start generating security center settings v1beta + - start generating osconfig agentendpoint v1 + - setup generation for bigquery/connection/v1beta1 +- all: + - increase continous testing timeout to 45m + - various updates to autogenerated clients. + +## v0.55.0 + +- Various updates to autogenerated clients. + +## v0.54.0 + +- all: + - remove unused golang.org/x/exp from mod file + - update godoc.org links to pkg.go.dev +- compute/metadata: + - use defaultClient when http.Client is nil + - remove subscribeClient +- iam: + - add support for v3 policy and IAM conditions +- Various updates to autogenerated clients. + +## v0.53.0 + +- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers). + - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API. +- profiler: remove symbolization (drops support for go1.10) +- Various updates to autogenerated clients. + +## v0.52.0 + +- internal/gapicgen: multiple improvements related to library generation. +- compute/metadata: unset ResponseHeaderTimeout in defaultClient +- docs: fix link to KMS in README.md +- Various updates to autogenerated clients. + +## v0.51.0 + +- secretmanager: + - add IAM helper for generic resource IAM handle +- cloudbuild: + - migrate to microgen in a major version +- Various updates to autogenerated clients. + +## v0.50.0 + +- profiler: + - Support disabling CPU profile collection. + - Log when a profile creation attempt begins. +- compute/metadata: + - Fix panic on malformed URLs. + - InstanceName returns actual instance name. +- Various updates to autogenerated clients. + +## v0.49.0 + +- functions/metadata: + - Handle string resources in JSON unmarshaller. +- Various updates to autogenerated clients. + +## v0.48.0 + +- Various updates to autogenerated clients + +## v0.47.0 + +This release drops support for Go 1.9 and Go 1.10: we continue to officially +support Go 1.11, Go 1.12, and Go 1.13. + +- Various updates to autogenerated clients. +- Add cloudbuild/apiv1 client. + +## v0.46.3 + +This is an empty release that was created solely to aid in storage's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.2 + +This is an empty release that was created solely to aid in spanner's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.1 + +This is an empty release that was created solely to aid in firestore's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.0 + +- spanner: + - Retry "Session not found" for read-only transactions. + - Retry aborted PDMLs. +- spanner/spannertest: + - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly. +- storage: + - Add HMACKeyOptions. + - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL, + DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice + StorageClasses but they are still acceptable values. +- trace: + - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been + marked OBSOLETE for several years: it is now no longer provided. If you + relied on this package, please vendor it or switch to using + https://cloud.google.com/trace/docs/setup/go (which obsoleted it). + +## v0.45.1 + +This is an empty release that was created solely to aid in pubsub's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.45.0 + +- compute/metadata: + - Add Email method. +- storage: + - Fix duplicated retry logic. + - Add ReaderObjectAttrs.StartOffset. + - Support reading last N bytes of a file when a negative range is given, such + as `obj.NewRangeReader(ctx, -10, -1)`. + - Add HMACKey listing functionality. +- spanner/spannertest: + - Support primary keys with no columns. + - Fix MinInt64 parsing. + - Implement deletion of key ranges. + - Handle reads during a read-write transaction. + - Handle returning DATE values. +- pubsub: + - Fix Ack/Modack request size calculation. +- logging: + - Add auto-detection of monitored resources on GAE Standard. + +## v0.44.3 + +This is an empty release that was created solely to aid in bigtable's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.2 + +This is an empty release that was created solely to aid in bigquery's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.1 + +This is an empty release that was created solely to aid in datastore's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.0 + +- datastore: + - Interface elements whose underlying types are supported, are now supported. + - Reduce time to initial retry from 1s to 100ms. +- firestore: + - Add Increment transformation. +- storage: + - Allow emulator with STORAGE_EMULATOR_HOST. + - Add methods for HMAC key management. +- pubsub: + - Add PublishCount and PublishLatency measurements. + - Add DefaultPublishViews and DefaultSubscribeViews for convenience of + importing all views. + - Add add Subscription.PushConfig.AuthenticationMethod. +- spanner: + - Allow emulator usage with SPANNER_EMULATOR_HOST. + - Add cloud.google.com/go/spanner/spannertest, a spanner emulator. + - Add cloud.google.com/go/spanner/spansql which contains types and a parser + for the Cloud Spanner SQL dialect. +- asset: + - Add apiv1p2beta1 client. + +## v0.43.0 + +This is an empty release that was created solely to aid in logging's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.42.0 + +- bigtable: + - Add an admin method to update an instance and clusters. + - Fix bttest regex matching behavior for alternations (things like `|a`). + - Expose BlockAllFilter filter. +- bigquery: + - Add Routines API support. +- storage: + - Add read-only Bucket.LocationType. +- logging: + - Add TraceSampled to Entry. + - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context. +- pubsub: + - Add Cloud Key Management to TopicConfig. + - Change ExpirationPolicy to optional.Duration. +- automl: + - Add apiv1beta1 client. +- iam: + - Fix compilation problem with iam/credentials/apiv1. + +## v0.41.0 + +- bigtable: + - Check results from PredicateFilter in bttest, which fixes certain false matches. +- profiler: + - debugLog checks user defined logging options before logging. +- spanner: + - PartitionedUpdates respect query parameters. + - StartInstance allows specifying cloud API access scopes. +- bigquery: + - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics. +- firestore: + - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll. +- replay: + - Change references to IPv4 addresses to localhost, making replay compatible with IPv6. + +## v0.40.0 + +- all: + - Update to protobuf-golang v1.3.1. +- datastore: + - Attempt to decode GAE-encoded keys if initial decoding attempt fails. + - Support integer time conversion. +- pubsub: + - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow, + this value should be adjusted higher. + - Use IPv6 compatible target in testutil. +- bigtable: + - Fix Latin-1 regexp filters in bttest, allowing \C. + - Expose PassAllFilter. +- profiler: + - Add log messages for slow path in start. + - Fix start to allow retry until success. +- firestore: + - Add admin client. +- containeranalysis: + - Add apiv1 client. +- grafeas: + - Add apiv1 client. + +## 0.39.0 + +- bigtable: + - Implement DeleteInstance in bttest. + - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest. +- bigquery: + - Move RequirePartitionFilter outside of TimePartioning. + - Expose models API. +- firestore: + - Allow array values in create and update calls. + - Add CollectionGroup method. +- pubsub: + - Add ExpirationPolicy to Subscription. +- storage: + - Add V4 signing. +- rpcreplay: + - Match streams by first sent request. This further improves rpcreplay's + ability to distinguish streams. +- httpreplay: + - Set up Man-In-The-Middle config only once. This should improve proxy + creation when multiple proxies are used in a single process. + - Remove error on empty Content-Type, allowing requests with no Content-Type + header but a non-empty body. +- all: + - Fix an edge case bug in auto-generated library pagination by properly + propagating pagetoken. + +## 0.38.0 + +This update includes a substantial reduction in our transitive dependency list +by way of updating to opencensus@v0.21.0. + +- spanner: + - Error implements GRPCStatus, allowing status.Convert. +- bigtable: + - Fix a bug in bttest that prevents single column queries returning results + that match other filters. + - Remove verbose retry logging. +- logging: + - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and + rune replace manually. +- recaptchaenterprise: + - Add v1beta1 client. +- phishingprotection: + - Add v1beta1 client. + +## 0.37.4 + +This patch releases re-builds the go.sum. This was not possible in the +previous release. + +- firestore: + - Add sentinel value DetectProjectID for auto-detecting project ID. + - Add OpenCensus tracing for public methods. + - Marked stable. All future changes come with a backwards compatibility + guarantee. + - Removed firestore/apiv1beta1. All users relying on this low-level library + should migrate to firestore/apiv1. Note that most users should use the + high-level firestore package instead. +- pubsub: + - Allow large messages in synchronous pull case. + - Cap bundler byte limit. This should prevent OOM conditions when there are + a very large number of message publishes occurring. +- storage: + - Add ETag to BucketAttrs and ObjectAttrs. +- datastore: + - Removed some non-sensical OpenCensus traces. +- webrisk: + - Add v1 client. +- asset: + - Add v1 client. +- cloudtasks: + - Add v2 client. + +## 0.37.3 + +This patch release removes github.com/golang/lint from the transitive +dependency list, resolving `go get -u` problems. + +Note: this release intentionally has a broken go.sum. Please use v0.37.4. + +## 0.37.2 + +This patch release is mostly intended to bring in v0.3.0 of +google.golang.org/api, which fixes a GCF deployment issue. + +Note: we had to-date accidentally marked Redis as stable. In this release, we've +fixed it by downgrading its documentation to alpha, as it is in other languages +and docs. + +- all: + - Document context in generated libraries. + +## 0.37.1 + +Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which +introduces a new oauth2 url. + +## 0.37.0 + +- spanner: + - Add BatchDML method. + - Reduced initial time between retries. +- bigquery: + - Produce better error messages for InferSchema. + - Add logical type control for avro loads. + - Add support for the GEOGRAPHY type. +- datastore: + - Add sentinel value DetectProjectID for auto-detecting project ID. + - Allow flatten tag on struct pointers. + - Fixed a bug that caused queries to panic with invalid queries. Instead they + will now return an error. +- profiler: + - Add ability to override GCE zone and instance. +- pubsub: + - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more + consistently retry specific error codes based on whether they're idempotent + or non-idempotent. +- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing + the Content-Length header to be dropped. +- iot: + - Add new apiv1 client. +- securitycenter: + - Add new apiv1 client. +- cloudscheduler: + - Add new apiv1 client. + +## 0.36.0 + +- spanner: + - Reduce minimum retry backoff from 1s to 100ms. This makes time between + retries much faster and should improve latency. +- storage: + - Add support for Bucket Policy Only. +- kms: + - Add ResourceIAM helper method. + - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM. +- firestore: + - Switch from v1beta1 API to v1 API. + - Allow emulator with FIRESTORE_EMULATOR_HOST. +- bigquery: + - Add NumLongTermBytes to Table. + - Add TotalBytesProcessedAccuracy to QueryStatistics. +- irm: + - Add new v1alpha2 client. +- talent: + - Add new v4beta1 client. +- rpcreplay: + - Fix connection to work with grpc >= 1.17. + - It is now required for an actual gRPC server to be running for Dial to + succeed. + +## 0.35.1 + +- spanner: + - Adds OpenCensus views back to public API. + +## v0.35.0 + +- all: + - Add go.mod and go.sum. + - Switch usage of gax-go to gax-go/v2. +- bigquery: + - Fix bug where time partitioning could not be removed from a table. + - Fix panic that occurred with empty query parameters. +- bttest: + - Fix bug where deleted rows were returned by ReadRows. +- bigtable/emulator: + - Configure max message size to 256 MiB. +- firestore: + - Allow non-transactional queries in transactions. + - Allow StartAt/EndBefore on direct children at any depth. + - QuerySnapshotIterator.Stop may be called in an error state. + - Fix bug the prevented reset of transaction write state in between retries. +- functions/metadata: + - Make Metadata.Resource a pointer. +- logging: + - Make SpanID available in logging.Entry. +- metadata: + - Wrap !200 error code in a typed err. +- profiler: + - Add function to check if function name is within a particular file in the + profile. + - Set parent field in create profile request. + - Return kubernetes client to start cluster, so client can be used to poll + cluster. + - Add function for checking if filename is in profile. +- pubsub: + - Fix bug where messages expired without an initial modack in + synchronous=true mode. + - Receive does not retry ResourceExhausted errors. +- spanner: + - client.Close now cancels existing requests and should be much faster for + large amounts of sessions. + - Correctly allow MinOpened sessions to be spun up. + +## v0.34.0 + +- functions/metadata: + - Switch to using JSON in context. + - Make Resource a value. +- vision: Fix ProductSearch return type. +- datastore: Add an example for how to handle MultiError. + +## v0.33.1 + +- compute: Removes an erroneously added go.mod. +- logging: Populate source location in fromLogEntry. + +## v0.33.0 + +- bttest: + - Add support for apply_label_transformer. +- expr: + - Add expr library. +- firestore: + - Support retrieval of missing documents. +- kms: + - Add IAM methods. +- pubsub: + - Clarify extension documentation. +- scheduler: + - Add v1beta1 client. +- vision: + - Add product search helper. + - Add new product search client. + +## v0.32.0 + +Note: This release is the last to support Go 1.6 and 1.8. + +- bigquery: + - Add support for removing an expiration. + - Ignore NeverExpire in Table.Create. + - Validate table expiration time. +- cbt: + - Add note about not supporting arbitrary bytes. +- datastore: + - Align key checks. +- firestore: + - Return an error when using Start/End without providing values. +- pubsub: + - Add pstest Close method. + - Clarify MaxExtension documentation. +- securitycenter: + - Add v1beta1 client. +- spanner: + - Allow nil in mutations. + - Improve doc of SessionPoolConfig.MaxOpened. + - Increase session deletion timeout from 5s to 15s. + +## v0.31.0 + +- bigtable: + - Group mutations across multiple requests. +- bigquery: + - Link to bigquery troubleshooting errors page in bigquery.Error comment. +- cbt: + - Fix go generate command. + - Document usage of both maxage + maxversions. +- datastore: + - Passing nil keys results in ErrInvalidKey. +- firestore: + - Clarify what Document.DataTo does with untouched struct fields. +- profile: + - Validate service name in agent. +- pubsub: + - Fix deadlock with pstest and ctx.Cancel. + - Fix a possible deadlock in pstest. +- trace: + - Update doc URL with new fragment. + +Special thanks to @fastest963 for going above and beyond helping us to debug +hard-to-reproduce Pub/Sub issues. + +## v0.30.0 + +- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information. +- bigtable: bttest supports row sample filter. +- functions: metadata package added for accessing Cloud Functions resource metadata. + +## v0.29.0 + +- bigtable: + - Add retry to all idempotent RPCs. + - cbt supports complex GC policies. + - Emulator supports arbitrary bytes in regex filters. +- firestore: Add ArrayUnion and ArrayRemove. +- logging: Add the ContextFunc option to supply the context used for + asynchronous RPCs. +- profiler: Ignore NotDefinedError when fetching the instance name +- pubsub: + - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled. + - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning. + - Fix deadlock. + - Restore Ack/Nack/Modacks metrics. + - Improve context handling in iterator. + - Implement synchronous mode for Receive. + - pstest: add Pull. +- spanner: Add a metric for the number of sessions currently opened. +- storage: + - Canceling the context releases all resources. + - Add additional RetentionPolicy attributes. +- vision/apiv1: Add LocalizeObjects method. + +## v0.28.0 + +- bigtable: + - Emulator returns Unimplemented for snapshot RPCs. +- bigquery: + - Support zero-length repeated, nested fields. +- cloud assets: + - Add v1beta client. +- datastore: + - Don't nil out transaction ID on retry. +- firestore: + - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next + returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator + (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots. + - Add array-contains operator. +- IAM: + - Add iam/credentials/apiv1 client. +- pubsub: + - Canceling the context passed to Subscription.Receive causes Receive to return when + processing finishes on all messages currently in progress, even if new messages are arriving. +- redis: + - Add redis/apiv1 client. +- storage: + - Add Reader.Attrs. + - Deprecate several Reader getter methods: please use Reader.Attrs for these instead. + - Add ObjectHandle.Bucket and ObjectHandle.Object methods. + +## v0.27.0 + +- bigquery: + - Allow modification of encryption configuration and partitioning options to a table via the Update call. + - Add a SchemaFromJSON function that converts a JSON table schema. +- bigtable: + - Restore cbt count functionality. +- containeranalysis: + - Add v1beta client. +- spanner: + - Fix a case where an iterator might not be closed correctly. +- storage: + - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount. + - Add a method to Reader that returns the parsed value of the Last-Modified header. + +## v0.26.0 + +- bigquery: + - Support filtering listed jobs by min/max creation time. + - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering). + - Include job creator email in Job struct. +- bigtable: + - Add `RowSampleFilter`. + - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters + must match the entire target string to succeed. Previously, the emulator was + succeeding on partial matches. + NOTE: As of this release, this change only affects the emulator when run + from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched + from `gcloud` will be updated in a subsequent `gcloud` release. +- dataproc: Add apiv1beta2 client. +- datastore: Save non-nil pointer fields on omitempty. +- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header. +- logging/logadmin: Support writer_identity and include_children. +- pubsub: + - Support labels on topics and subscriptions. + - Support message storage policy for topics. + - Use the distribution of ack times to determine when to extend ack deadlines. + The only user-visible effect of this change should be that programs that + call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub + Subscriber`. +- storage: + - Support predefined ACLs. + - Support additional ACL fields other than Entity and Role. + - Support bucket websites. + - Support bucket logging. + + +## v0.25.0 + +- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md) +- bigtable: + - cbt: Support a GC policy of "never". +- errorreporting: + - Support User. + - Close now calls Flush. + - Use OnError (previously ignored). + - Pass through the RPC error as-is to OnError. +- httpreplay: A tool for recording and replaying HTTP requests + (for the bigquery and storage clients in this repo). +- kms: v1 client added +- logging: add SourceLocation to Entry. +- storage: improve CRC checking on read. + +## v0.24.0 + +- bigquery: Support for the NUMERIC type. +- bigtable: + - cbt: Optionally specify columns for read/lookup + - Support instance-level administration. +- oslogin: New client for the OS Login API. +- pubsub: + - The package is now stable. There will be no further breaking changes. + - Internal changes to improve Subscription.Receive behavior. +- storage: Support updating bucket lifecycle config. +- spanner: Support struct-typed parameter bindings. +- texttospeech: New client for the Text-to-Speech API. + +## v0.23.0 + +- bigquery: Add DDL stats to query statistics. +- bigtable: + - cbt: Add cells-per-column limit for row lookup. + - cbt: Make it possible to combine read filters. +- dlp: v2beta2 client removed. Use the v2 client instead. +- firestore, spanner: Fix compilation errors due to protobuf changes. + +## v0.22.0 + +- bigtable: + - cbt: Support cells per column limit for row read. + - bttest: Correctly handle empty RowSet. + - Fix ReadModifyWrite operation in emulator. + - Fix API path in GetCluster. + +- bigquery: + - BEHAVIOR CHANGE: Retry on 503 status code. + - Add dataset.DeleteWithContents. + - Add SchemaUpdateOptions for query jobs. + - Add Timeline to QueryStatistics. + - Add more stats to ExplainQueryStage. + - Support Parquet data format. + +- datastore: + - Support omitempty for times. + +- dlp: + - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, + which is now out of beta. + - Add v2 client. + +- firestore: + - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. + +- iam: + - Support JWT signing via SignJwt callopt. + +- profiler: + - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. + - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. + - Avoid returning empty serial port output. + +- pubsub: + - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. + - BEHAVIOR CHANGE: Don't backoff on EOF. + - pstest: Support Acknowledge and ModifyAckDeadline RPCs. + +- redis: + - Add v1 beta Redis client. + +- spanner: + - Support SessionLabels. + +- speech: + - Add api v1 beta1 client. + +- storage: + - BEHAVIOR CHANGE: Retry reads when retryable error occurs. + - Fix delete of object in requester-pays bucket. + - Support KMS integration. + +## v0.21.0 + +- bigquery: + - Add OpenCensus tracing. + +- firestore: + - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot + whose Exists method returns false. DocumentRef.Get and Transaction.Get + return the non-nil DocumentSnapshot in addition to a NotFound error. + **DocumentRef.GetAll and Transaction.GetAll return a non-nil + DocumentSnapshot instead of nil.** + - Add DocumentIterator.Stop. **Call Stop whenever you are done with a + DocumentIterator.** + - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime + notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. + - Canceling an RPC now always returns a grpc.Status with codes.Canceled. + +- spanner: + - Add `CommitTimestamp`, which supports inserting the commit timestamp of a + transaction into a column. + +## v0.20.0 + +- bigquery: Support SchemaUpdateOptions for load jobs. + +- bigtable: + - Add SampleRowKeys. + - cbt: Support union, intersection GCPolicy. + - Retry admin RPCS. + - Add trace spans to retries. + +- datastore: Add OpenCensus tracing. + +- firestore: + - Fix queries involving Null and NaN. + - Allow Timestamp protobuffers for time values. + +- logging: Add a WriteTimeout option. + +- spanner: Support Batch API. + +- storage: Add OpenCensus tracing. + +## v0.19.0 + +- bigquery: + - Support customer-managed encryption keys. + +- bigtable: + - Improved emulator support. + - Support GetCluster. + +- datastore: + - Add general mutations. + - Support pointer struct fields. + - Support transaction options. + +- firestore: + - Add Transaction.GetAll. + - Support document cursors. + +- logging: + - Support concurrent RPCs to the service. + - Support per-entry resources. + +- profiler: + - Add config options to disable heap and thread profiling. + - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. + +- pubsub: + - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the + callback returns). + - Add SubscriptionInProject. + - Add OpenCensus instrumentation for streaming pull. + +- storage: + - Support CORS. + +## v0.18.0 + +- bigquery: + - Marked stable. + - Schema inference of nullable fields supported. + - Added TimePartitioning to QueryConfig. + +- firestore: Data provided to DocumentRef.Set with a Merge option can contain + Delete sentinels. + +- logging: Clients can accept parent resources other than projects. + +- pubsub: + - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. + - Support updating more subscription metadata: AckDeadline, + RetainAckedMessages and RetentionDuration. + +- oslogin/apiv1beta: New client for the Cloud OS Login API. + +- rpcreplay: A package for recording and replaying gRPC traffic. + +- spanner: + - Add a ReadWithOptions that supports a row limit, as well as an index. + - Support query plan and execution statistics. + - Added [OpenCensus](http://opencensus.io) support. + +- storage: Clarify checksum validation for gzipped files (it is not validated + when the file is served uncompressed). + + +## v0.17.0 + +- firestore BREAKING CHANGES: + - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. + Change + `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` + to + `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` + + Change + `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` + to + `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` + - Rename MergePaths to Merge; require args to be FieldPaths + - A value stored as an integer can be read into a floating-point field, and vice versa. +- bigtable/cmd/cbt: + - Support deleting a column. + - Add regex option for row read. +- spanner: Mark stable. +- storage: + - Add Reader.ContentEncoding method. + - Fix handling of SignedURL headers. +- bigquery: + - If Uploader.Put is called with no rows, it returns nil without making a + call. + - Schema inference supports the "nullable" option in struct tags for + non-required fields. + - TimePartitioning supports "Field". + + +## v0.16.0 + +- Other bigquery changes: + - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). + - UseStandardSQL is deprecated; set UseLegacySQL to true if you need + Legacy SQL. + - Uploader.Put will generate a random insert ID if you do not provide one. + - Support time partitioning for load jobs. + - Support dry-run queries. + - A `Job` remembers its last retrieved status. + - Support retrieving job configuration. + - Support labels for jobs and tables. + - Support dataset access lists. + - Improve support for external data sources, including data from Bigtable and + Google Sheets, and tables with external data. + - Support updating a table's view configuration. + - Fix uploading civil times with nanoseconds. + +- storage: + - Support PubSub notifications. + - Support Requester Pays buckets. + +- profiler: Support goroutine and mutex profile types. + +## v0.15.0 + +- firestore: beta release. See the + [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). + +- errorreporting: The existing package has been redesigned. + +- errors: This package has been removed. Use errorreporting. + + +## v0.14.0 + +- bigquery BREAKING CHANGES: + - Standard SQL is the default for queries and views. + - `Table.Create` takes `TableMetadata` as a second argument, instead of + options. + - `Dataset.Create` takes `DatasetMetadata` as a second argument. + - `DatasetMetadata` field `ID` renamed to `FullID` + - `TableMetadata` field `ID` renamed to `FullID` + +- Other bigquery changes: + - The client will append a random suffix to a provided job ID if you set + `AddJobIDSuffix` to true in a job config. + - Listing jobs is supported. + - Better retry logic. + +- vision, language, speech: clients are now stable + +- monitoring: client is now beta + +- profiler: + - Rename InstanceName to Instance, ZoneName to Zone + - Auto-detect service name and version on AppEngine. + +## v0.13.0 + +- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these + options to continue using Legacy SQL after the client switches its default + to Standard SQL. + +- bigquery: Support for updating dataset labels. + +- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other + than the client's. DatasetsInProject is no longer needed and is deprecated. + +- bigtable: Fail ListInstances when any zones fail. + +- spanner: support decoding of slices of basic types (e.g. []string, []int64, + etc.) + +- logging/logadmin: UpdateSink no longer creates a sink if it is missing + (actually a change to the underlying service, not the client) + +- profiler: Service and ServiceVersion replace Target in Config. + +## v0.12.0 + +- pubsub: Subscription.Receive now uses streaming pull. + +- pubsub: add Client.TopicInProject to access topics in a different project + than the client. + +- errors: renamed errorreporting. The errors package will be removed shortly. + +- datastore: improved retry behavior. + +- bigquery: support updates to dataset metadata, with etags. + +- bigquery: add etag support to Table.Update (BREAKING: etag argument added). + +- bigquery: generate all job IDs on the client. + +- storage: support bucket lifecycle configurations. + + +## v0.11.0 + +- Clients for spanner, pubsub and video are now in beta. + +- New client for DLP. + +- spanner: performance and testing improvements. + +- storage: requester-pays buckets are supported. + +- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. + +- pubsub: bug fixes and other minor improvements + +## v0.10.0 + +- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. + +- pubsub: Subscription.Receive now runs concurrently for higher throughput. + +- vision: cloud.google.com/go/vision is deprecated. Use +cloud.google.com/go/vision/apiv1 instead. + +- translation: now stable. + +- trace: several changes to the surface. See the link below. + +### Code changes required from v0.9.0 + +- pubsub: Replace + + ``` + sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) + ``` + + with + + ``` + sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + ``` + +- trace: traceGRPCServerInterceptor will be provided from *trace.Client. +Given an initialized `*trace.Client` named `tc`, instead of + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) + ``` + + write + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + ``` + +- trace trace.GRPCClientInterceptor will also provided from *trace.Client. +Instead of + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) + ``` + + write + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + ``` + +- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC +interceptor as a dial option as shown below when initializing Cloud package +clients: + + ``` + c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) + if err != nil { + ... + } + ``` + + +## v0.9.0 + +- Breaking changes to some autogenerated clients. +- rpcreplay package added. + +## v0.8.0 + +- profiler package added. +- storage: + - Retry Objects.Insert call. + - Add ProgressFunc to WRiter. +- pubsub: breaking changes: + - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). + - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). + - Message.Done replaced with Message.Ack and Message.Nack. + +## v0.7.0 + +- Release of a client library for Spanner. See +the +[blog +post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). +Note that although the Spanner service is beta, the Go client library is alpha. + +## v0.6.0 + +- Beta release of BigQuery, DataStore, Logging and Storage. See the +[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). + +- bigquery: + - struct support. Read a row directly into a struct with +`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. +You can also use field tags. See the [package documentation][cloud-bigquery-ref] +for details. + + - The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + + - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + + - Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + + - The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +## v0.5.0 + +- bigquery: + - The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + - Support for query parameters. + - Support deleting a dataset. + - Values from INTEGER columns will now be returned as int64, not int. This + will avoid errors arising from large values on 32-bit systems. +- datastore: + - Nested Go structs encoded as Entity values, instead of a +flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg. + ```go + type State struct { + Cities []struct{ + Populations []int + } + } + ``` + See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + - Contexts no longer hold namespaces; instead you must set a key's namespace + explicitly. Also, key functions have been changed and renamed. + - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + - All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +## v0.4.0 + +- bigquery: + -`NewGCSReference` is now a function, not a method on `Client`. + - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling + loading data into a table from a file or any `io.Reader`. + * Client.Table and Client.OpenTable have been removed. + Replace + ```go + client.OpenTable("project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table") + ``` + + * Client.CreateTable has been removed. + Replace + ```go + client.CreateTable(ctx, "project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table").Create(ctx) + ``` + + * Dataset.ListTables have been replaced with Dataset.Tables. + Replace + ```go + tables, err := ds.ListTables(ctx) + ``` + with + ```go + it := ds.Tables(ctx) + for { + table, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use table. + } + ``` + + * Client.Read has been replaced with Job.Read, Table.Read and Query.Read. + Replace + ```go + it, err := client.Read(ctx, job) + ``` + with + ```go + it, err := job.Read(ctx) + ``` + and similarly for reading from tables or queries. + + * The iterator returned from the Read methods is now named RowIterator. Its + behavior is closer to the other iterators in these libraries. It no longer + supports the Schema method; see the next item. + Replace + ```go + for it.Next(ctx) { + var vals ValueList + if err := it.Get(&vals); err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + if err := it.Err(); err != nil { + // TODO: Handle error. + } + ``` + with + ``` + for { + var vals ValueList + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + ``` + Instead of the `RecordsPerRequest(n)` option, write + ```go + it.PageInfo().MaxSize = n + ``` + Instead of the `StartIndex(i)` option, write + ```go + it.StartIndex = i + ``` + + * ValueLoader.Load now takes a Schema in addition to a slice of Values. + Replace + ```go + func (vl *myValueLoader) Load(v []bigquery.Value) + ``` + with + ```go + func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) + ``` + + + * Table.Patch is replace by Table.Update. + Replace + ```go + p := table.Patch() + p.Description("new description") + metadata, err := p.Apply(ctx) + ``` + with + ```go + metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "new description", + }) + ``` + + * Client.Copy is replaced by separate methods for each of its four functions. + All options have been replaced by struct fields. + + * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. + + Replace + ```go + client.Copy(ctx, table, gcsRef) + ``` + with + ```go + table.LoaderFrom(gcsRef).Run(ctx) + ``` + Instead of passing options to Copy, set fields on the Loader: + ```go + loader := table.LoaderFrom(gcsRef) + loader.WriteDisposition = bigquery.WriteTruncate + ``` + + * To extract data from a table into Google Cloud Storage, use + Table.ExtractorTo. Set fields on the returned Extractor instead of + passing options. + + Replace + ```go + client.Copy(ctx, gcsRef, table) + ``` + with + ```go + table.ExtractorTo(gcsRef).Run(ctx) + ``` + + * To copy data into a table from one or more other tables, use + Table.CopierFrom. Set fields on the returned Copier instead of passing options. + + Replace + ```go + client.Copy(ctx, dstTable, srcTable) + ``` + with + ```go + dst.Table.CopierFrom(srcTable).Run(ctx) + ``` + + * To start a query job, create a Query and call its Run method. Set fields + on the query instead of passing options. + + Replace + ```go + client.Copy(ctx, table, query) + ``` + with + ```go + query.Run(ctx) + ``` + + * Table.NewUploader has been renamed to Table.Uploader. Instead of options, + configure an Uploader by setting its fields. + Replace + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + ``` + with + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + u.IgnoreUnknownValues = true + ``` + +- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + +## v0.3.0 + +- storage: + * AdminClient replaced by methods on Client. + Replace + ```go + adminClient.CreateBucket(ctx, bucketName, attrs) + ``` + with + ```go + client.Bucket(bucketName).Create(ctx, projectID, attrs) + ``` + + * BucketHandle.List replaced by BucketHandle.Objects. + Replace + ```go + for query != nil { + objs, err := bucket.List(d.ctx, query) + if err != nil { ... } + query = objs.Next + for _, obj := range objs.Results { + fmt.Println(obj) + } + } + ``` + with + ```go + iter := bucket.Objects(d.ctx, query) + for { + obj, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { ... } + fmt.Println(obj) + } + ``` + (The `iterator` package is at `google.golang.org/api/iterator`.) + + Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. + + Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. + + + * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, nil) + ``` + with + ```go + attrs, err := dst.CopierFrom(src).Run(ctx) + ``` + + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + c := dst.CopierFrom(src) + c.ContextType = "text/html" + attrs, err := c.Run(ctx) + ``` + + * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. + Replace + ```go + attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) + ``` + with + ```go + attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) + ``` + + * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. + Replace + ```go + attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) + ``` + + * ObjectHandle.WithConditions replaced by ObjectHandle.If. + Replace + ```go + obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) + ``` + with + ```go + obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) + ``` + + Replace + ```go + obj.WithConditions(storage.IfGenerationMatch(0)) + ``` + with + ```go + obj.If(storage.Conditions{DoesNotExist: true}) + ``` + + * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). + +- Package preview/logging deleted. Use logging instead. + +## v0.2.0 + +- Logging client replaced with preview version (see below). + +- New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +- Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. + diff --git a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..8fd1bc9c --- /dev/null +++ b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md @@ -0,0 +1,44 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 00000000..109ca5c4 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,250 @@ +# Contributing + +1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). + The issue will be used to discuss the bug or feature and should be created + before sending a CL. + +1. [Install Go](https://golang.org/dl/). + 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) + is in your `PATH`. + 1. Check it's working by running `go version`. + * If it doesn't work, check the install location, usually + `/usr/local/go`, is on your `PATH`. + +1. Sign one of the +[contributor license agreements](#contributor-license-agreements) below. + +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` + +1. Change into the checked out source: + `cd google-cloud-go` + +1. Fork the repo. + +1. Set your fork as a remote: + `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git` + +1. Make changes (see [Formatting](#formatting) and [Style](#style)), commit to + your fork. + + Commit messages should follow the + [Go project style](https://github.com/golang/go/wiki/CommitMessage). For example: + ``` + functions: add gophers codelab + ``` + +1. Send a pull request with your changes. + +1. A maintainer will review the pull request and make comments. + + Prefer adding additional commits over amending and force-pushing since it can + be difficult to follow code reviews when the commit history changes. + + Commits will be squashed when they're merged. + +## Integration Tests + +In addition to the unit tests, you may run the integration test suite. These +directions describe setting up your environment to run integration tests for +_all_ packages: note that many of these instructions may be redundant if you +intend only to run integration tests on a single package. + +#### GCP Setup + +To run the integrations tests, creation and configuration of two projects in +the Google Developers Console is required: one specifically for Firestore +integration tests, and another for all other integration tests. We'll refer to +these projects as "general project" and "Firestore project". + +After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) +for each project. Ensure the project-level **Owner** +[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to +each service account. During the creation of the service account, you should +download the JSON credential file for use later. + +Next, ensure the following APIs are enabled in the general project: + +- BigQuery API +- BigQuery Data Transfer API +- Cloud Dataproc API +- Cloud Dataproc Control API Private +- Cloud Datastore API +- Cloud Firestore API +- Cloud Key Management Service (KMS) API +- Cloud Natural Language API +- Cloud OS Login API +- Cloud Pub/Sub API +- Cloud Resource Manager API +- Cloud Spanner API +- Cloud Speech API +- Cloud Translation API +- Cloud Video Intelligence API +- Cloud Vision API +- Compute Engine API +- Compute Engine Instance Group Manager API +- Container Registry API +- Firebase Rules API +- Google Cloud APIs +- Google Cloud Deployment Manager V2 API +- Google Cloud SQL +- Google Cloud Storage +- Google Cloud Storage JSON API +- Google Compute Engine Instance Group Updater API +- Google Compute Engine Instance Groups API +- Kubernetes Engine API +- Stackdriver Error Reporting API + +Next, create a Datastore database in the general project, and a Firestore +database in the Firestore project. + +Finally, in the general project, create an API key for the translate API: + +- Go to GCP Developer Console. +- Navigate to APIs & Services > Credentials. +- Click Create Credentials > API Key. +- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. + +#### Local Setup + +Once the two projects are created and configured, set the following environment +variables: + +- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. +bamboo-shift-455) for the general project. +- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general +project's service account. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID +(e.g. doorway-cliff-677) for the Firestore project. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the +Firestore project's service account. +- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, +in the form +"projects/P/locations/L/keyRings/R". The creation of this is described below. +- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API. +- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it to +create some resources used in integration tests. + +From the project's root directory: + +``` sh +# Sets the default project in your env. +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticates the gcloud tool with your account. +$ gcloud auth login + +# Create the indexes used in the datastore integration tests. +$ gcloud datastore indexes create datastore/testdata/index.yaml + +# Creates a Google Cloud storage bucket with the same name as your test project, +# and with the Stackdriver Logging service account as owner, for the sink +# integration tests in logging. +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Creates a PubSub topic for integration tests of storage notifications. +$ gcloud beta pubsub topics create go-storage-notification-test +# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user +# "service-@gs-project-accounts.iam.gserviceaccount.com" +# as a publisher to that topic. + +# Creates a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to +# delete the instance after testing with 'gcloud beta spanner instances delete'. + +$ export MY_KEYRING=some-keyring-name +$ export MY_LOCATION=global +# Creates a KMS keyring, in the same location as the default location for your +# project's buckets. +$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION +# Creates two keys in the keyring, named key1 and key2. +$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. +$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING +# Authorizes Google Cloud Storage to encrypt and decrypt using key1. +gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 +``` + +#### Running + +Once you've done the necessary setup, you can run the integration tests by +running: + +``` sh +$ go test -v cloud.google.com/go/... +``` + +#### Replay + +Some packages can record the RPCs during integration tests to a file for +subsequent replay. To record, pass the `-record` flag to `go test`. The +recording will be saved to the _package_`.replay` file. To replay integration +tests from a saved recording, the replay file must be present, the `-short` +flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` +environment variable must have a non-empty value. + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your +work**, then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 00000000..b115812c --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,178 @@ +# Google Cloud Client Libraries for Go + +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go) + +Go packages for [Google Cloud Platform](https://cloud.google.com) services. + +``` go +import "cloud.google.com/go" +``` + +To install the packages on your system, *do not clone the repo*. Instead: + +1. Change to your project directory: + + ``` + cd /my/cloud/project + ``` +1. Get the package you want to use. Some products have their own module, so it's + best to `go get` the package(s) you want to use: + + ``` + $ go get cloud.google.com/go/firestore # Replace with the package you want to use. + ``` + +**NOTE:** Some of these packages are under development, and may occasionally +make backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +## Supported APIs + +Google API | Status | Package +------------------------------------------------|--------------|----------------------------------------------------------- +[Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta) +[Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1) +[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) +[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) +[Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) +[Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) +[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) +[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) +[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) +[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore) +[Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2) +[Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2) +[Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2) +[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting) +[Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore) +[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam) +[IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1) +[IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2) +[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1) +[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1) +[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging) +[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1) +[Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) +[OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) +[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) +[Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) +[reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) +[Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) +[Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1) +[Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1) +[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner) +[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1) +[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage) +[Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1) +[Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1) +[Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2) +[Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate) +[Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2) +[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1) +[Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1) + +> **Alpha status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go) + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. + +## Authorization + +By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +[snip]:# (auth) +```go +client, err := storage.NewClient(ctx) +``` + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile) +to the `NewClient` function of the desired package. For example: + +[snip]:# (auth-JSON) +```go +client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: +[snip]:# (auth-ts) +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory +[cloud-automl]: https://cloud.google.com/automl +[cloud-build]: https://cloud.google.com/cloud-build/ +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-container]: https://cloud.google.com/containers/ +[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis +[cloud-dataproc]: https://cloud.google.com/dataproc/ +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ +[cloud-debugger]: https://cloud.google.com/debugger/ +[cloud-dlp]: https://cloud.google.com/dlp/ +[cloud-errors]: https://cloud.google.com/error-reporting/ +[cloud-firestore]: https://cloud.google.com/firestore/ +[cloud-iam]: https://cloud.google.com/iam/ +[cloud-iot]: https://cloud.google.com/iot-core/ +[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts +[cloud-kms]: https://cloud.google.com/kms/ +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-language]: https://cloud.google.com/natural-language +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-natural-language]: https://cloud.google.com/natural-language/ +[cloud-memorystore]: https://cloud.google.com/memorystore/ +[cloud-monitoring]: https://cloud.google.com/monitoring/ +[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest +[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/ +[cloud-securitycenter]: https://cloud.google.com/security-command-center/ +[cloud-scheduler]: https://cloud.google.com/scheduler +[cloud-spanner]: https://cloud.google.com/spanner/ +[cloud-speech]: https://cloud.google.com/speech +[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ +[cloud-tasks]: https://cloud.google.com/tasks/ +[cloud-texttospeech]: https://cloud.google.com/texttospeech/ +[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ +[cloud-trace]: https://cloud.google.com/trace/ +[cloud-translate]: https://cloud.google.com/translate +[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/ +[cloud-recommender]: https://cloud.google.com/recommendations/ +[cloud-video]: https://cloud.google.com/video-intelligence/ +[cloud-vision]: https://cloud.google.com/vision +[cloud-webrisk]: https://cloud.google.com/web-risk/ diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md new file mode 100644 index 00000000..c8c7f933 --- /dev/null +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -0,0 +1,128 @@ +# Setup from scratch + +1. [Install Go](https://golang.org/dl/). + 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) + is in your `PATH`. + 1. Check it's working by running `go version`. + * If it doesn't work, check the install location, usually + `/usr/local/go`, is on your `PATH`. + +1. Sign one of the +[contributor license agreements](#contributor-license-agreements) below. + +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` + +1. Change into the checked out source: + `cd google-cloud-go` + +1. Fork the repo and add your fork as a secondary remote (this is necessary in + order to create PRs). + +# Which module to release? + +The Go client libraries have several modules. Each module does not strictly +correspond to a single library - they correspond to trees of directories. If a +file needs to be released, you must release the closest ancestor module. + +To see all modules: + +``` +$ cat `find . -name go.mod` | grep module +module cloud.google.com/go +module cloud.google.com/go/bigtable +module cloud.google.com/go/firestore +module cloud.google.com/go/bigquery +module cloud.google.com/go/storage +module cloud.google.com/go/datastore +module cloud.google.com/go/pubsub +module cloud.google.com/go/spanner +module cloud.google.com/go/logging +``` + +The `cloud.google.com/go` is the repository root module. Each other module is +a submodule. + +So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest +ancestor module is `cloud.google.com/go/bigtable` - so you should release a new +version of the `cloud.google.com/go/bigtable` submodule. + +If you need to release a change in `asset/apiv1/asset_client.go`, the closest +ancestor module is `cloud.google.com/go` - so you should release a new version +of the `cloud.google.com/go` repository root module. Note: releasing +`cloud.google.com/go` has no impact on any of the submodules, and vice-versa. +They are released entirely independently. + +# Test failures + +If there are any test failures in the Kokoro build, releases are blocked until +the failures have been resolved. + +# How to release `cloud.google.com/go` + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any + failures in the most recent build, address them before proceeding with the + release. +1. Navigate to `~/code/gocloud/` and switch to master. +1. `git pull` +1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. + The current latest tag `$CV` is the largest tag. It should look something + like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a + specific library, not the module root). We'll call the current version `$CV` + and the new version `$NV`. +1. On master, run `git log $CV...` to list all the changes since the last + release. NOTE: You must manually visually parse out changes to submodules [1] + (the `git log` is going to show you things in submodules, which are not going + to be part of your release). +1. Edit `CHANGES.md` to include a summary of the changes. +1. `cd internal/version && go generate && cd -` +1. Commit the changes, push to your fork, and create a PR. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: + a. Switch to master. + b. `git pull` + c. Tag the repo with the next version: `git tag $NV`. + d. Push the tag to origin: + `git push origin $NV` +2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) + with the new release, copying the contents of `CHANGES.md`. + +# How to release a submodule + +We have several submodules, including `cloud.google.com/go/logging`, +`cloud.google.com/go/datastore`, and so on. + +To release a submodule: + +(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any + failures in the most recent build, address them before proceeding with the + release. (This applies even if the failures are in a different submodule from the one + being released.) +1. Navigate to `~/code/gocloud/` and switch to master. +1. `git pull` +1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all + existing releases. The current latest tag `$CV` is the largest tag. It + should look something like `datastore/vX.Y.Z`. We'll call the current version + `$CV` and the new version `$NV`. +1. On master, run `git log $CV.. -- datastore/` to list all the changes to the + submodule directory since the last release. +1. Edit `datastore/CHANGES.md` to include a summary of the changes. +1. `cd internal/version && go generate && cd -` +1. Commit the changes, push to your fork, and create a PR. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: + a. Switch to master. + b. `git pull` + c. Tag the repo with the next version: `git tag $NV`. + d. Push the tag to origin: + `git push origin $NV` +1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) + with the new release, copying the contents of `datastore/CHANGES.md`. + +# Appendix + +1: This should get better as submodule tooling matures. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 4ff4e2f1..6b13424f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -61,25 +61,14 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) +var defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, +}} // NotDefinedError is returned when requested metadata is not defined. // @@ -151,7 +140,7 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.LookupHost("metadata.google.internal") + addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") if err != nil || len(addrs) == 0 { resc <- false return @@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool { return name == "Google" || name == "Google Compute Engine" } -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). +// Subscribe calls Client.Subscribe on the default client. func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) + return defaultClient.Subscribe(suffix, fn) } // Get calls Client.Get on the default client. @@ -280,9 +268,14 @@ type Client struct { hc *http.Client } -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. +// NewClient returns a Client that can be used to fetch metadata. +// Returns the client that uses the specified http.Client for HTTP requests. +// If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { + if c == nil { + return defaultClient + } + return &Client{hc: c} } @@ -304,7 +297,10 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { host = metadataIP } u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return "", "", err + } req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) res, err := c.hc.Do(req) @@ -407,11 +403,7 @@ func (c *Client) InstanceTags() ([]string, error) { // InstanceName returns the current VM's instance ID string. func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil + return c.getTrimmed("instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go new file mode 100644 index 00000000..237d8456 --- /dev/null +++ b/vendor/cloud.google.com/go/doc.go @@ -0,0 +1,100 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package cloud is the root of the packages used to access Google Cloud +Services. See https://godoc.org/cloud.google.com/go for a full list +of sub-packages. + + +Client Options + +All clients in sub-packages are configurable via client options. These options are +described here: https://godoc.org/google.golang.org/api/option. + + +Authentication and Authorization + +All the clients in sub-packages support authentication via Google Application Default +Credentials (see https://cloud.google.com/docs/authentication/production), or +by providing a JSON key file for a Service Account. See the authentication examples +in this package for details. + + +Timeouts and Cancellation + +By default, all requests in sub-packages will run indefinitely, retrying on transient +errors when correctness allows. To set timeouts or arrange for cancellation, use +contexts. See the examples for details. + +Do not attempt to control the initial connection (dialing) of a service by setting a +timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts +would be ineffective and would only interfere with credential refreshing, which uses +the same context. + + +Connection Pooling + +Connection pooling differs in clients based on their transport. Cloud +clients either rely on HTTP or gRPC transports to communicate +with Google Cloud. + +Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the +underlying HTTP transport to cache connections for later re-use. These are cached to +the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in +http.DefaultTransport. + +For gRPC clients (all others in this repo), connection pooling is configurable. Users +of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client +option to NewClient calls. This configures the underlying gRPC connections to be +pooled and addressed in a round robin fashion. + + +Using the Libraries with Docker + +Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to +hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 +for more information. + + +Debugging + +To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See +https://godoc.org/google.golang.org/grpc/grpclog for more information. + +For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". + + +Client Stability + +Clients in this repository are considered alpha or beta unless otherwise +marked as stable in the README.md. Semver is not used to communicate stability +of clients. + +Alpha and beta clients may change or go away without notice. + +Clients marked stable will maintain compatibility with future versions for as +long as we can reasonably sustain. Incompatible changes might be made in some +situations, including: + +- Security bugs may prompt backwards-incompatible changes. + +- Situations in which components are no longer feasible to maintain without +making breaking changes, including removal. + +- Parts of the client surface may be outright unstable and subject to change. +These parts of the surface will be labeled with the note, "It is EXPERIMENTAL +and subject to change or removal without notice." +*/ +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod new file mode 100644 index 00000000..5bdc67a9 --- /dev/null +++ b/vendor/cloud.google.com/go/go.mod @@ -0,0 +1,24 @@ +module cloud.google.com/go + +go 1.11 + +require ( + cloud.google.com/go/storage v1.10.0 + github.com/golang/mock v1.4.3 + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.5.0 + github.com/google/martian/v3 v3.0.0 + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 + github.com/googleapis/gax-go/v2 v2.0.5 + github.com/jstemmer/go-junit-report v0.9.1 + go.opencensus.io v0.22.4 + golang.org/x/lint v0.0.0-20200302205851-738671d3881b + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/text v0.3.3 + golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed + google.golang.org/api v0.29.0 + google.golang.org/genproto v0.0.0-20200711021454-869866162049 + google.golang.org/grpc v1.30.0 + google.golang.org/protobuf v1.25.0 // indirect +) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum new file mode 100644 index 00000000..3736901a --- /dev/null +++ b/vendor/cloud.google.com/go/go.sum @@ -0,0 +1,484 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0 h1:86K1Gel7BQ9/WmNWn7dTKMvTLFzwtBe5FNqYbi9X35g= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25 h1:OKbAoGs4fGM5cPLlVQLZGYkFC8OnOfgo6tt0Smf9XhM= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed h1:+qzWo37K31KxduIYaBeMqJ8MUOyTayOQKpH9aDPLMSY= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIoU2uUvrMVl+H26359loFFUleSMXFo= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= +google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go index 5232cb67..0a06ea2e 100644 --- a/vendor/cloud.google.com/go/iam/iam.go +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -38,6 +38,7 @@ type client interface { Get(ctx context.Context, resource string) (*pb.Policy, error) Set(ctx context.Context, resource string, p *pb.Policy) error Test(ctx context.Context, resource string, perms []string) ([]string, error) + GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) } // grpcClient implements client for the standard gRPC-based IAMPolicy service. @@ -57,13 +58,22 @@ var withRetry = gax.WithRetry(func() gax.Retryer { }) func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + return g.GetWithVersion(ctx, resource, 1) +} + +func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { var proto *pb.Policy md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) ctx = insertMetadata(ctx, md) err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { var err error - proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ + Resource: resource, + Options: &pb.GetPolicyOptions{ + RequestedPolicyVersion: requestedPolicyVersion, + }, + }) return err }, withRetry) if err != nil { @@ -110,11 +120,18 @@ type Handle struct { resource string } +// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions). +type Handle3 struct { + c client + resource string + version int32 +} + // InternalNewHandle is for use by the Google Cloud Libraries only. // // InternalNewHandle returns a Handle for resource. // The conn parameter refers to a server that must support the IAMPolicy service. -func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { +func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) } @@ -137,6 +154,17 @@ func InternalNewHandleClient(c client, resource string) *Handle { } } +// V3 returns a Handle3, which is like Handle except it sets +// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3 +// when storing a policy. +func (h *Handle) V3() *Handle3 { + return &Handle3{ + c: h.c, + resource: h.resource, + version: 3, + } +} + // Policy retrieves the IAM policy for the resource. func (h *Handle) Policy(ctx context.Context) (*Policy, error) { proto, err := h.c.Get(ctx, h.resource) @@ -313,3 +341,47 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { } return metadata.NewOutgoingContext(ctx, out) } + +// A Policy3 is a list of Bindings representing roles granted to members. +// +// The zero Policy3 is a valid policy with no bindings. +// +// It is similar to a Policy, except a Policy3 provides direct access to the +// list of Bindings. +// +// The policy version is always set to 3. +type Policy3 struct { + etag []byte + Bindings []*pb.Binding +} + +// Policy retrieves the IAM policy for the resource. +// +// requestedPolicyVersion is always set to 3. +func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { + proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) + if err != nil { + return nil, err + } + return &Policy3{ + Bindings: proto.Bindings, + etag: proto.Etag, + }, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { + return h.c.Set(ctx, h.resource, &pb.Policy{ + Bindings: policy.Bindings, + Etag: policy.etag, + Version: h.version, + }) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json new file mode 100644 index 00000000..77368c01 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -0,0 +1,770 @@ +{ + "cloud.google.com/go/asset/apiv1": { + "distribution_name": "cloud.google.com/go/asset/apiv1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/asset/apiv1beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/asset/apiv1p2beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p2beta1", + "release_level": "beta" + }, + "cloud.google.com/go/asset/apiv1p5beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1", + "release_level": "beta" + }, + "cloud.google.com/go/automl/apiv1": { + "distribution_name": "cloud.google.com/go/automl/apiv1", + "description": "Cloud AutoML API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/automl/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/automl/apiv1beta1": { + "distribution_name": "cloud.google.com/go/automl/apiv1beta1", + "description": "Cloud AutoML API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/automl/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery": { + "distribution_name": "cloud.google.com/go/bigquery", + "description": "BigQuery", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/connection/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", + "description": "BigQuery Connection API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/connection/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", + "description": "BigQuery Connection API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery/datatransfer/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", + "description": "BigQuery Data Transfer API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/datatransfer/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/reservation/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", + "description": "BigQuery Reservation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/reservation/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/reservation/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1", + "description": "BigQuery Reservation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/reservation/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery/storage/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/storage/apiv1alpha2": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha2", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1alpha2", + "release_level": "alpha" + }, + "cloud.google.com/go/bigquery/storage/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery/storage/apiv1beta2": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/bigtable": { + "distribution_name": "cloud.google.com/go/bigtable", + "description": "Cloud BigTable", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigtable", + "release_level": "ga" + }, + "cloud.google.com/go/billing/apiv1": { + "distribution_name": "cloud.google.com/go/billing/apiv1", + "description": "Cloud Billing API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/cloudbuild/apiv1/v2": { + "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", + "description": "Cloud Build API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1/v2", + "release_level": "ga" + }, + "cloud.google.com/go/cloudtasks/apiv2": { + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", + "description": "Cloud Tasks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/cloudtasks/apiv2beta2": { + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", + "description": "Cloud Tasks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2beta2", + "release_level": "beta" + }, + "cloud.google.com/go/cloudtasks/apiv2beta3": { + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", + "description": "Cloud Tasks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2beta3", + "release_level": "beta" + }, + "cloud.google.com/go/container/apiv1": { + "distribution_name": "cloud.google.com/go/container/apiv1", + "description": "Kubernetes Engine API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/container/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/containeranalysis/apiv1beta1": { + "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", + "description": "Container Analysis API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/datacatalog/apiv1": { + "distribution_name": "cloud.google.com/go/datacatalog/apiv1", + "description": "Google Cloud Data Catalog API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/datacatalog/apiv1beta1": { + "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", + "description": "Google Cloud Data Catalog API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/dataproc/apiv1": { + "distribution_name": "cloud.google.com/go/dataproc/apiv1", + "description": "Cloud Dataproc API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/dataproc/apiv1beta2": { + "distribution_name": "cloud.google.com/go/dataproc/apiv1beta2", + "description": "Cloud Dataproc API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/datastore": { + "distribution_name": "cloud.google.com/go/datastore", + "description": "Cloud Datastore", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore", + "release_level": "ga" + }, + "cloud.google.com/go/datastore/admin/apiv1": { + "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", + "description": "Cloud Datastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore/admin/apiv1", + "release_level": "alpha" + }, + "cloud.google.com/go/debugger/apiv2": { + "distribution_name": "cloud.google.com/go/debugger/apiv2", + "description": "Stackdriver Debugger API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/debugger/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/dialogflow/apiv2": { + "distribution_name": "cloud.google.com/go/dialogflow/apiv2", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/dlp/apiv2": { + "distribution_name": "cloud.google.com/go/dlp/apiv2", + "description": "Cloud Data Loss Prevention (DLP) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/errorreporting": { + "distribution_name": "cloud.google.com/go/errorreporting", + "description": "Stackdriver Error Reporting API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting", + "release_level": "beta" + }, + "cloud.google.com/go/errorreporting/apiv1beta1": { + "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", + "description": "Stackdriver Error Reporting API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/firestore": { + "distribution_name": "cloud.google.com/go/firestore", + "description": "Cloud Firestore API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore", + "release_level": "ga" + }, + "cloud.google.com/go/firestore/apiv1": { + "distribution_name": "cloud.google.com/go/firestore/apiv1", + "description": "Cloud Firestore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/firestore/apiv1/admin": { + "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", + "description": "Google Cloud Firestore Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1/admin", + "release_level": "ga" + }, + "cloud.google.com/go/gaming/apiv1beta": { + "distribution_name": "cloud.google.com/go/gaming/apiv1beta", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/iam": { + "distribution_name": "cloud.google.com/go/iam", + "description": "Cloud IAM", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/iam", + "release_level": "ga" + }, + "cloud.google.com/go/iam/credentials/apiv1": { + "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", + "description": "IAM Service Account Credentials API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/iam/credentials/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/iot/apiv1": { + "distribution_name": "cloud.google.com/go/iot/apiv1", + "description": "Cloud IoT API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/iot/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/kms/apiv1": { + "distribution_name": "cloud.google.com/go/kms/apiv1", + "description": "Cloud Key Management Service (KMS) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/kms/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/language/apiv1": { + "distribution_name": "cloud.google.com/go/language/apiv1", + "description": "Cloud Natural Language API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/language/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/language/apiv1beta2": { + "distribution_name": "cloud.google.com/go/language/apiv1beta2", + "description": "Cloud Natural Language API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/language/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/logging": { + "distribution_name": "cloud.google.com/go/logging", + "description": "Stackdriver Logging API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging", + "release_level": "ga" + }, + "cloud.google.com/go/logging/apiv2": { + "distribution_name": "cloud.google.com/go/logging/apiv2", + "description": "Cloud Logging API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/longrunning/autogen": { + "distribution_name": "cloud.google.com/go/longrunning/autogen", + "description": "Long Running Operations API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/longrunning/autogen", + "release_level": "alpha" + }, + "cloud.google.com/go/memcache/apiv1beta2": { + "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", + "description": "Cloud Memorystore for Memcached API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/monitoring/apiv3/v2": { + "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", + "description": "Cloud Monitoring API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2", + "release_level": "ga" + }, + "cloud.google.com/go/monitoring/dashboard/apiv1": { + "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/osconfig/agentendpoint/apiv1": { + "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { + "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/osconfig/apiv1": { + "distribution_name": "cloud.google.com/go/osconfig/apiv1", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/osconfig/apiv1beta": { + "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/oslogin/apiv1": { + "distribution_name": "cloud.google.com/go/oslogin/apiv1", + "description": "Cloud OS Login API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/oslogin/apiv1beta": { + "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", + "description": "Cloud OS Login API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/phishingprotection/apiv1beta1": { + "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", + "description": "Phishing Protection API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/policytroubleshooter/apiv1": { + "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", + "description": "Policy Troubleshooter API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/policytroubleshooter/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/pubsub": { + "distribution_name": "cloud.google.com/go/pubsub", + "description": "Cloud PubSub", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub", + "release_level": "ga" + }, + "cloud.google.com/go/pubsub/apiv1": { + "distribution_name": "cloud.google.com/go/pubsub/apiv1", + "description": "Cloud Pub/Sub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/recaptchaenterprise/apiv1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", + "description": "reCAPTCHA Enterprise API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "description": "reCAPTCHA Enterprise API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/recommender/apiv1": { + "distribution_name": "cloud.google.com/go/recommender/apiv1", + "description": "Recommender API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/recommender/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", + "description": "Recommender API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/redis/apiv1": { + "distribution_name": "cloud.google.com/go/redis/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/redis/apiv1beta1": { + "distribution_name": "cloud.google.com/go/redis/apiv1beta1", + "description": "Google Cloud Memorystore for Redis API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/rpcreplay": { + "distribution_name": "cloud.google.com/go/rpcreplay", + "description": "RPC Replay", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/rpcreplay", + "release_level": "ga" + }, + "cloud.google.com/go/scheduler/apiv1": { + "distribution_name": "cloud.google.com/go/scheduler/apiv1", + "description": "Cloud Scheduler API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/scheduler/apiv1beta1": { + "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", + "description": "Cloud Scheduler API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/secretmanager/apiv1": { + "distribution_name": "cloud.google.com/go/secretmanager/apiv1", + "description": "Secret Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/secretmanager/apiv1beta1": { + "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", + "description": "Secret Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/securitycenter/apiv1": { + "distribution_name": "cloud.google.com/go/securitycenter/apiv1", + "description": "Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/securitycenter/apiv1beta1": { + "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", + "description": "Cloud Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/securitycenter/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", + "description": "Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1p1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/securitycenter/settings/apiv1beta1": { + "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", + "description": "Cloud Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/settings/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/servicedirectory/apiv1beta1": { + "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", + "description": "Service Directory API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/spanner": { + "distribution_name": "cloud.google.com/go/spanner", + "description": "Cloud Spanner", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner", + "release_level": "ga" + }, + "cloud.google.com/go/spanner/admin/database/apiv1": { + "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", + "description": "Cloud Spanner Database Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/admin/database/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/spanner/admin/instance/apiv1": { + "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", + "description": "Cloud Spanner Instance Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/admin/instance/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/spanner/apiv1": { + "distribution_name": "cloud.google.com/go/spanner/apiv1", + "description": "Cloud Spanner API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/speech/apiv1": { + "distribution_name": "cloud.google.com/go/speech/apiv1", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/speech/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/speech/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/speech/apiv1p1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/storage": { + "distribution_name": "cloud.google.com/go/storage", + "description": "Cloud Storage (GCS)", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/storage", + "release_level": "ga" + }, + "cloud.google.com/go/talent/apiv4beta1": { + "distribution_name": "cloud.google.com/go/talent/apiv4beta1", + "description": "Cloud Talent Solution API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1", + "release_level": "beta" + }, + "cloud.google.com/go/texttospeech/apiv1": { + "distribution_name": "cloud.google.com/go/texttospeech/apiv1", + "description": "Cloud Text-to-Speech API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/trace": { + "distribution_name": "cloud.google.com/go/trace", + "description": "Stackdriver Trace", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace", + "release_level": "ga" + }, + "cloud.google.com/go/trace/apiv1": { + "distribution_name": "cloud.google.com/go/trace/apiv1", + "description": "Stackdriver Trace API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/trace/apiv2": { + "distribution_name": "cloud.google.com/go/trace/apiv2", + "description": "Stackdriver Trace API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/translate/apiv3": { + "distribution_name": "cloud.google.com/go/translate/apiv3", + "description": "Cloud Translation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/translate/apiv3", + "release_level": "ga" + }, + "cloud.google.com/go/videointelligence/apiv1": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1", + "description": "Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/videointelligence/apiv1beta2": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", + "description": "Google Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/vision/apiv1": { + "distribution_name": "cloud.google.com/go/vision/apiv1", + "description": "Cloud Vision API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/vision/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/vision/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", + "description": "Cloud Vision API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/vision/apiv1p1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/webrisk/apiv1": { + "distribution_name": "cloud.google.com/go/webrisk/apiv1", + "description": "Web Risk API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/webrisk/apiv1beta1": { + "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", + "description": "Web Risk API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1", + "release_level": "beta" + } +} diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md new file mode 100644 index 00000000..8857c8f6 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/README.md @@ -0,0 +1,18 @@ +# Internal + +This directory contains internal code for cloud.google.com/go packages. + +## .repo-metadata-full.json + +`.repo-metadata-full.json` contains metadata about the packages in this repo. It +is generated by `internal/gapicgen/generator`. It's processed by external tools +to build lists of all of the packages. + +Don't make breaking changes to the format without consulting with the external +tools. + +One day, we may want to create individual `.repo-metadata.json` files next to +each package, which is the pattern followed by some other languages. External +tools would then talk to pkg.go.dev or some other service to get the overall +list of packages and use the `.repo-metadata.json` files to get the additional +metadata required. For now, `.repo-metadata-full.json` includes everything. \ No newline at end of file diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index d291921b..3328019a 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20190802" +const Repo = "20200706" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md new file mode 100644 index 00000000..f6d57be5 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -0,0 +1,83 @@ +# Changes + +## v1.10.0 +- Bump dependency on google.golang.org/api to capture changes to retry logic + which will make retries on writes more resilient. +- Improve documentation for Writer.ChunkSize. +- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. + +## v1.9.0 +- Add retry for transient network errors on most operations (with the exception + of writes). +- Bump dependency for google.golang.org/api to capture a change in the default + HTTP transport which will improve performance for reads under heavy load. +- Add CRC32C checksum validation option to Composer. + +## v1.8.0 +- Add support for V4 signed post policies. + +## v1.7.0 +- V4 signed URL support: + - Add support for bucket-bound domains and virtual hosted style URLs. + - Add support for query parameters in the signature. + - Fix text encoding to align with standards. +- Add the object name to query parameters for write calls. +- Fix retry behavior when reading files with Content-Encoding gzip. +- Fix response header in reader. +- New code examples: + - Error handling for `ObjectHandle` preconditions. + - Existence checks for buckets and objects. + +## v1.6.0 + +- Updated option handling: + - Don't drop custom scopes (#1756) + - Don't drop port in provided endpoint (#1737) + +## v1.5.0 + +- Honor WithEndpoint client option for reads as well as writes. +- Add archive storage class to docs. +- Make fixes to storage benchwrapper. + +## v1.4.0 + +- When listing objects in a bucket, allow callers to specify which attributes + are queried. This allows for performance optimization. + +## v1.3.0 + +- Use `storage.googleapis.com/storage/v1` by default for GCS requests + instead of `www.googleapis.com/storage/v1`. + +## v1.2.1 + +- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not + being sent in all cases. + +## v1.2.0 + +- Add support for UniformBucketLevelAccess. This configures access checks + to use only bucket-level IAM policies. + See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess. +- Fix userAgent to use correct version. + +## v1.1.2 + +- Fix memory leak in BucketIterator and ObjectIterator. + +## v1.1.1 + +- Send BucketPolicyOnly even when it's disabled. + +## v1.1.0 + +- Performance improvements for ObjectIterator and BucketIterator. +- Fix Bucket.ObjectIterator size calculation checks. +- Added HMACKeyOptions to all the methods which allows for options such as + UserProject to be set per invocation and optionally be used. + +## v1.0.0 + +This is the first tag to carve out storage as its own module. See: +https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. diff --git a/vendor/cloud.google.com/go/storage/LICENSE b/vendor/cloud.google.com/go/storage/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 07c470d3..47848264 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -232,10 +232,18 @@ type BucketAttrs struct { // ACL is the list of access control rules on the bucket. ACL []ACLRule - // BucketPolicyOnly configures access checks to use only bucket-level IAM - // policies. + // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of + // UniformBucketLevelAccess is recommended above the use of this field. + // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to + // true, will enable UniformBucketLevelAccess. BucketPolicyOnly BucketPolicyOnly + // UniformBucketLevelAccess configures access checks to use only bucket-level IAM + // policies and ignore any ACL rules for the bucket. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access + // for more information. + UniformBucketLevelAccess UniformBucketLevelAccess + // DefaultObjectACL is the list of access controls to // apply to new objects when no object ACL is provided. DefaultObjectACL []ACLRule @@ -267,14 +275,10 @@ type BucketAttrs struct { // StorageClass is the default storage class of the bucket. This defines // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "MULTI_REGIONAL", - // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which - // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on - // the bucket's location settings. - // - // "DURABLE_REDUCED_AVAILABILITY", "MULTI_REGIONAL" and "REGIONAL" - // are considered legacy storage classes. + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. StorageClass string // Created is the creation time of the bucket. @@ -327,8 +331,8 @@ type BucketAttrs struct { LocationType string } -// BucketPolicyOnly configures access checks to use only bucket-level IAM -// policies. +// BucketPolicyOnly is an alias for UniformBucketLevelAccess. +// Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly. type BucketPolicyOnly struct { // Enabled specifies whether access checks use only bucket-level IAM // policies. Enabled may be disabled until the locked time. @@ -338,6 +342,17 @@ type BucketPolicyOnly struct { LockedTime time.Time } +// UniformBucketLevelAccess configures access checks to use only bucket-level IAM +// policies. +type UniformBucketLevelAccess struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + // Lifecycle is the lifecycle configuration for objects in the bucket. type Lifecycle struct { Rules []LifecycleRule @@ -446,8 +461,7 @@ type LifecycleCondition struct { // MatchesStorageClasses is the condition matching the object's storage // class. // - // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", - // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string // NumNewerVersions is the condition matching objects with a number of newer versions. @@ -495,26 +509,27 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { return nil, err } return &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - DefaultEventBasedHold: b.DefaultEventBasedHold, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, - ACL: toBucketACLRules(b.Acl), - DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), - Labels: b.Labels, - RequesterPays: b.Billing != nil && b.Billing.RequesterPays, - Lifecycle: toLifecycle(b.Lifecycle), - RetentionPolicy: rp, - CORS: toCORS(b.Cors), - Encryption: toBucketEncryption(b.Encryption), - Logging: toBucketLogging(b.Logging), - Website: toBucketWebsite(b.Website), - BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), - Etag: b.Etag, - LocationType: b.LocationType, + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + DefaultEventBasedHold: b.DefaultEventBasedHold, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + ACL: toBucketACLRules(b.Acl), + DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), + Logging: toBucketLogging(b.Logging), + Website: toBucketWebsite(b.Website), + BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), + UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), + Etag: b.Etag, + LocationType: b.LocationType, }, nil } @@ -540,9 +555,9 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { bb = &raw.BucketBilling{RequesterPays: true} } var bktIAM *raw.BucketIamConfiguration - if b.BucketPolicyOnly.Enabled { + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { bktIAM = &raw.BucketIamConfiguration{ - BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ Enabled: true, }, } @@ -609,10 +624,20 @@ type BucketAttrsToUpdate struct { // newly created objects in this bucket. DefaultEventBasedHold optional.Bool - // BucketPolicyOnly configures access checks to use only bucket-level IAM - // policies. + // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of + // UniformBucketLevelAccess is recommended above the use of this field. + // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to + // true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and + // UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess + // will take precedence. BucketPolicyOnly *BucketPolicyOnly + // UniformBucketLevelAccess configures access checks to use only bucket-level IAM + // policies and ignore any ACL rules for the bucket. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access + // for more information. + UniformBucketLevelAccess *UniformBucketLevelAccess + // If set, updates the retention policy of the bucket. Using // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. // @@ -701,8 +726,17 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.BucketPolicyOnly != nil { rb.IamConfiguration = &raw.BucketIamConfiguration{ - BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ - Enabled: ua.BucketPolicyOnly.Enabled, + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: ua.BucketPolicyOnly.Enabled, + ForceSendFields: []string{"Enabled"}, + }, + } + } + if ua.UniformBucketLevelAccess != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: ua.UniformBucketLevelAccess.Enabled, + ForceSendFields: []string{"Enabled"}, }, } } @@ -716,6 +750,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.Lifecycle != nil { rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle") } if ua.Logging != nil { if *ua.Logging == (BucketLogging{}) { @@ -902,7 +937,7 @@ func toCORS(rc []*raw.BucketCors) []CORS { func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { var rl raw.BucketLifecycle if len(l.Rules) == 0 { - return nil + rl.ForceSendFields = []string{"Rule"} } for _, r := range l.Rules { rr := &raw.BucketLifecycleRule{ @@ -952,12 +987,11 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { }, } - switch { - case rr.Condition.IsLive == nil: + if rr.Condition.IsLive == nil { r.Condition.Liveness = LiveAndArchived - case *rr.Condition.IsLive == true: + } else if *rr.Condition.IsLive { r.Condition.Liveness = Live - case *rr.Condition.IsLive == false: + } else { r.Condition.Liveness = Archived } @@ -1041,8 +1075,26 @@ func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { } } +func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { + if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { + return UniformBucketLevelAccess{} + } + lt, err := time.Parse(time.RFC3339, b.UniformBucketLevelAccess.LockedTime) + if err != nil { + return UniformBucketLevelAccess{ + Enabled: true, + } + } + return UniformBucketLevelAccess{ + Enabled: true, + LockedTime: lt, + } +} + // Objects returns an iterator over the objects in the bucket that match the Query q. // If q is nil, no filtering is done. +// +// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { it := &ObjectIterator{ ctx: ctx, @@ -1059,6 +1111,8 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { } // An ObjectIterator is an iterator over ObjectAttrs. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. type ObjectIterator struct { ctx context.Context bucket *BucketHandle @@ -1069,6 +1123,8 @@ type ObjectIterator struct { } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if @@ -1078,6 +1134,8 @@ func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *ObjectIterator) Next() (*ObjectAttrs, error) { if err := it.nextFunc(); err != nil { return nil, err @@ -1094,6 +1152,9 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) req.Delimiter(it.query.Delimiter) req.Prefix(it.query.Prefix) req.Versions(it.query.Versions) + if len(it.query.fieldSelection) > 0 { + req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) + } req.PageToken(pageToken) if it.bucket.userProject != "" { req.UserProject(it.bucket.userProject) @@ -1126,6 +1187,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project // are returned. +// +// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { it := &BucketIterator{ ctx: ctx, @@ -1136,10 +1199,13 @@ func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator it.fetch, func() int { return len(it.buckets) }, func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it } // A BucketIterator is an iterator over BucketAttrs. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. type BucketIterator struct { // Prefix restricts the iterator to buckets whose names begin with it. Prefix string @@ -1155,6 +1221,8 @@ type BucketIterator struct { // Next returns the next result. Its second return value is iterator.Done if // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) Next() (*BucketAttrs, error) { if err := it.nextFunc(); err != nil { return nil, err @@ -1165,6 +1233,8 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) { } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 52162e72..61983df5 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -166,6 +166,13 @@ type Composer struct { // or zero-valued attributes are ignored. ObjectAttrs + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Composer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data in the destination object does not match + // the checksum, the compose will be rejected. + SendCRC32C bool + dst *ObjectHandle srcs []*ObjectHandle } @@ -186,6 +193,9 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { // Compose requires a non-empty Destination, so we always set it, // even if the caller-provided ObjectAttrs is the zero value. req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + if c.SendCRC32C { + req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C) + } for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 88f64590..614ea11a 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -117,6 +117,33 @@ Objects also have attributes, which you can fetch with Attrs: fmt.Printf("object %s has size %d and can be read using %s\n", objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) +Listing objects + +Listing objects in a bucket is done with the Bucket.Objects method: + + query := &storage.Query{Prefix: ""} + + var names []string + it := bkt.Objects(ctx, query) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + names = append(names, attrs.Name) + } + +If only a subset of object attributes is needed when listing, specifying this +subset using Query.SetAttrSelection may speed up the listing process: + + query := &storage.Query{Prefix: ""} + query.SetAttrSelection([]string{"Name"}) + + // ... as before + ACLs Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of @@ -164,6 +191,21 @@ SignedURL for details. } fmt.Println(url) +Post Policy V4 Signed Request + +A type of signed request that allows uploads through HTML forms directly to Cloud Storage with +temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised +by a user. + +For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well +as the documentation of GenerateSignedPostPolicyV4. + + pv4, err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + Errors Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). diff --git a/vendor/cloud.google.com/go/storage/go.mod b/vendor/cloud.google.com/go/storage/go.mod new file mode 100644 index 00000000..2eb6df3c --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go.mod @@ -0,0 +1,18 @@ +module cloud.google.com/go/storage + +go 1.11 + +require ( + cloud.google.com/go v0.57.0 + cloud.google.com/go/bigquery v1.8.0 // indirect + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.4.1 + github.com/googleapis/gax-go/v2 v2.0.5 + golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect + golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 // indirect + google.golang.org/api v0.28.0 + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 + google.golang.org/grpc v1.29.1 +) diff --git a/vendor/cloud.google.com/go/storage/go.sum b/vendor/cloud.google.com/go/storage/go.sum new file mode 100644 index 00000000..5d3fca5f --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go.sum @@ -0,0 +1,450 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0 h1:K2NyuHRuv15ku6eUpe0DQk5ZykPMnSOnvuVf6IHcjaE= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a h1:7Wlg8L54In96HTWOaI4sreLJ6qfyGuvSau5el3fK41Y= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a h1:7YaEqUc1tUg0yDwvdX+3U5bwrBg7u3FFAZ5D8gUs4/c= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74 h1:KW20qMcLRWuIgjdCpHFJbVZA7zsDKtFXPNcm7/eI5ZA= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nSRS/JrZEig0gz2BY4VNrg= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d h1:7M9AXzLrJWWGdDYtBblPHBTnHtaN6KKQ98OYb35mLlY= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d h1:3K34ovZAOnVaUPxanr0j4ghTZTPTA0CnXvjCl+5lZqk= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5XLHpzXXqcCdZPP/ApQ5NY= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 h1:FD4wDsP+CQUqh2V12OBOt90pLHVToe58P++fUu3ggV4= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0 h1:GwFK8+l5/gdsOYKz5p6M4UK+QT8OvmHWZPJCnf+5DjA= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90 h1:7THRSvPuzF1bql5kyFzX0JM0vpGhwuhskgJrJsbZ80Y= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383 h1:Vo0fD5w0fUKriWlZLyrim2GXbumyN0D6euW79T9PgEE= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 h1:jiDSspVssiikoRPFHT6pYrL+CL6/yIc3b9AuHO/4xik= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 h1:FGjyjrQGURdc98leD1P65IdQD9Zlr4McvRcqIlV6OSs= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go index 206813f0..c1273d59 100644 --- a/vendor/cloud.google.com/go/storage/go110.go +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -16,7 +16,12 @@ package storage -import "google.golang.org/api/googleapi" +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) func shouldRetry(err error) bool { switch e := err.(type) { @@ -24,6 +29,17 @@ func shouldRetry(err error) bool { // Retry on 429 and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + return false case interface{ Temporary() bool }: return e.Temporary() default: diff --git a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go new file mode 100644 index 00000000..7df7a1d7 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go @@ -0,0 +1,22 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the cloud.google.com/go import, won't actually become part of +// the resultant binary. +// +build modhack + +package storage + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 4a5c1b51..7d8185f3 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -25,6 +25,8 @@ import ( ) // HMACState is the state of the HMAC key. +// +// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACState string const ( @@ -105,9 +107,21 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // Get invokes an RPC to retrieve the HMAC key referenced by the // HMACKeyHandle's accessID. // +// Options such as UserProjectForHMACKeys can be used to set the +// userProject to be billed against for operations. +// // This method is EXPERIMENTAL and subject to change or removal without notice. -func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) { +func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { call := hkh.raw.Get(hkh.projectID, hkh.accessID) + + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + call = call.UserProject(desc.userProjectID) + } + setClientHeader(call.Header()) var metadata *raw.HmacKeyMetadata @@ -131,8 +145,15 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) { // After deletion, a key cannot be used to authenticate requests. // // This method is EXPERIMENTAL and subject to change or removal without notice. -func (hkh *HMACKeyHandle) Delete(ctx context.Context) error { +func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID) + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + delCall = delCall.UserProject(desc.userProjectID) + } setClientHeader(delCall.Header()) return runWithRetry(ctx, func() error { @@ -173,7 +194,7 @@ func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, er // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. // // This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string) (*HMACKey, error) { +func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { if projectID == "" { return nil, errors.New("storage: expecting a non-blank projectID") } @@ -183,6 +204,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma svc := raw.NewProjectsHmacKeysService(c.raw) call := svc.Create(projectID, serviceAccountEmail) + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + call = call.UserProject(desc.userProjectID) + } + setClientHeader(call.Header()) var hkPb *raw.HmacKey @@ -212,7 +241,7 @@ type HMACKeyAttrsToUpdate struct { // Update mutates the HMACKey referred to by accessID. // // This method is EXPERIMENTAL and subject to change or removal without notice. -func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*HMACKey, error) { +func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { if au.State != Active && au.State != Inactive { return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) } @@ -221,6 +250,14 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*H Etag: au.Etag, State: string(au.State), }) + + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + call = call.UserProject(desc.userProjectID) + } setClientHeader(call.Header()) var metadata *raw.HmacKeyMetadata @@ -241,6 +278,8 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*H // An HMACKeysIterator is an iterator over HMACKeys. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeysIterator struct { ctx context.Context @@ -250,18 +289,25 @@ type HMACKeysIterator struct { pageInfo *iterator.PageInfo nextFunc func() error index int + desc hmacKeyDesc } // ListHMACKeys returns an iterator for listing HMACKeys. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIterator { +func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { it := &HMACKeysIterator{ ctx: ctx, raw: raw.NewProjectsHmacKeysService(c.raw), projectID: projectID, } + for _, opt := range opts { + opt.withHMACKeyDesc(&it.desc) + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.hmacKeys) - it.index }, @@ -278,6 +324,8 @@ func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIt // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) Next() (*HMACKey, error) { if err := it.nextFunc(); err != nil { @@ -292,16 +340,26 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { call := it.raw.List(it.projectID) setClientHeader(call.Header()) - call = call.PageToken(pageToken) - // By default we'll also show deleted keys and then - // let users filter on their own. - call = call.ShowDeletedKeys(true) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if it.desc.showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if it.desc.userProjectID != "" { + call = call.UserProject(it.desc.userProjectID) + } + if it.desc.forServiceAccountEmail != "" { + call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail) + } if pageSize > 0 { call = call.MaxResults(int64(pageSize)) } @@ -328,3 +386,56 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, } return resp.NextPageToken, nil } + +type hmacKeyDesc struct { + forServiceAccountEmail string + showDeletedKeys bool + userProjectID string +} + +// HMACKeyOption configures the behavior of HMACKey related methods and actions. +// +// This interface is EXPERIMENTAL and subject to change or removal without notice. +type HMACKeyOption interface { + withHMACKeyDesc(*hmacKeyDesc) +} + +type hmacKeyDescFunc func(*hmacKeyDesc) + +func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { + hkdf(hkd) +} + +// ForHMACKeyServiceAccountEmail returns HMAC Keys that are +// associated with the email address of a service account in the project. +// +// Only one service account email can be used as a filter, so if multiple +// of these options are applied, the last email to be set will be used. +// +// This option is EXPERIMENTAL and subject to change or removal without notice. +func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.forServiceAccountEmail = serviceAccountEmail + }) +} + +// ShowDeletedHMACKeys will also list keys whose state is "DELETED". +// +// This option is EXPERIMENTAL and subject to change or removal without notice. +func ShowDeletedHMACKeys() HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.showDeletedKeys = true + }) +} + +// UserProjectForHMACKeys will bill the request against userProjectID +// if userProjectID is non-empty. +// +// Note: This is a noop right now and only provided for API compatibility. +// +// This option is EXPERIMENTAL and subject to change or removal without notice. +func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.userProjectID = userProjectID + }) +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 9d936067..5caefb05 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -21,6 +21,7 @@ import ( "cloud.google.com/go/internal/trace" raw "google.golang.org/api/storage/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/genproto/googleapis/type/expr" ) // IAM provides access to IAM access control for the bucket. @@ -38,10 +39,14 @@ type iamClient struct { } func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + return c.GetWithVersion(ctx, resource, 1) +} + +func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.GetIamPolicy(resource) + call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion)) setClientHeader(call.Header()) if c.userProject != "" { call.UserProject(c.userProject) @@ -97,6 +102,7 @@ func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { return &raw.Policy{ Bindings: iamToStorageBindings(ip.Bindings), Etag: string(ip.Etag), + Version: int64(ip.Version), } } @@ -104,13 +110,26 @@ func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { var rbs []*raw.PolicyBindings for _, ib := range ibs { rbs = append(rbs, &raw.PolicyBindings{ - Role: ib.Role, - Members: ib.Members, + Role: ib.Role, + Members: ib.Members, + Condition: iamToStorageCondition(ib.Condition), }) } return rbs } +func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr { + if exprpb == nil { + return nil + } + return &raw.Expr{ + Expression: exprpb.Expression, + Description: exprpb.Description, + Location: exprpb.Location, + Title: exprpb.Title, + } +} + func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { return &iampb.Policy{ Bindings: iamFromStorageBindings(rp.Bindings), @@ -122,9 +141,22 @@ func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { var ibs []*iampb.Binding for _, rb := range rbs { ibs = append(ibs, &iampb.Binding{ - Role: rb.Role, - Members: rb.Members, + Role: rb.Role, + Members: rb.Members, + Condition: iamFromStorageCondition(rb.Condition), }) } return ibs } + +func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr { + if rawexpr == nil { + return nil + } + return &expr.Expr{ + Expression: rawexpr.Expression, + Description: rawexpr.Description, + Location: rawexpr.Location, + Title: rawexpr.Title, + } +} diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go new file mode 100644 index 00000000..b9df7db9 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -0,0 +1,377 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// PostPolicyV4Options are used to construct a signed post policy. +// Please see https://cloud.google.com/storage/docs/xml-api/post-object +// for reference about the fields. +type PostPolicyV4Options struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func(hashBytes []byte) (signature []byte, err error) + + // Expires is the expiration time on the signed URL. + // It must be a time in the future. + // Required. + Expires time.Time + + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Optional. + Style URLStyle + + // Insecure when set indicates that the generated URL's scheme + // will use "http" instead of "https" (default). + // Optional. + Insecure bool + + // Fields specifies the attributes of a PostPolicyV4 request. + // When Fields is non-nil, its attributes must match those that will + // passed into field Conditions. + // Optional. + Fields *PolicyV4Fields + + // The conditions that the uploaded file will be expected to conform to. + // When used, the failure of an upload to satisfy a condition will result in + // a 4XX status code, back with the message describing the problem. + // Optional. + Conditions []PostPolicyV4Condition +} + +// PolicyV4Fields describes the attributes for a PostPolicyV4 request. +type PolicyV4Fields struct { + // ACL specifies the access control permissions for the object. + // Optional. + ACL string + // CacheControl specifies the caching directives for the object. + // Optional. + CacheControl string + // ContentType specifies the media type of the object. + // Optional. + ContentType string + // ContentDisposition specifies how the file will be served back to requesters. + // Optional. + ContentDisposition string + // ContentEncoding specifies the decompressive transcoding that the object. + // This field is complementary to ContentType in that the file could be + // compressed but ContentType specifies the file's original media type. + // Optional. + ContentEncoding string + // Metadata specifies custom metadata for the object. + // If any key doesn't begin with "x-goog-meta-", an error will be returned. + // Optional. + Metadata map[string]string + // StatusCodeOnSuccess when set, specifies the status code that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + StatusCodeOnSuccess int + // RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + RedirectToURLOnSuccess string +} + +// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request. +type PostPolicyV4 struct { + // URL is the generated URL that the file upload will be made to. + URL string + // Fields specifies the generated key-values that the file uploader + // must include in their multipart upload form. + Fields map[string]string +} + +// PostPolicyV4Condition describes the constraints that the subsequent +// object upload's multipart form fields will be expected to conform to. +type PostPolicyV4Condition interface { + isEmpty() bool + json.Marshaler +} + +type startsWith struct { + key, value string +} + +func (sw *startsWith) MarshalJSON() ([]byte, error) { + return json.Marshal([]string{"starts-with", sw.key, sw.value}) +} +func (sw *startsWith) isEmpty() bool { + return sw.value == "" +} + +// ConditionStartsWith checks that an attributes starts with value. +// An empty value will cause this condition to be ignored. +func ConditionStartsWith(key, value string) PostPolicyV4Condition { + return &startsWith{key, value} +} + +type contentLengthRangeCondition struct { + start, end uint64 +} + +func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) +} +func (clr *contentLengthRangeCondition) isEmpty() bool { + return clr.start == 0 && clr.end == 0 +} + +type singleValueCondition struct { + name, value string +} + +func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{svc.name: svc.value}) +} +func (svc *singleValueCondition) isEmpty() bool { + return svc.value == "" +} + +// ConditionContentLengthRange constraints the limits that the +// multipart upload's range header will be expected to be within. +func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { + return &contentLengthRangeCondition{start, end} +} + +func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { + return &singleValueCondition{"success_action_redirect", redirectURL} +} + +func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { + svc := &singleValueCondition{name: "success_action_status"} + if statusCode > 0 { + svc.value = fmt.Sprintf("%d", statusCode) + } + return svc +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + if bucket == "" { + return nil, errors.New("storage: bucket must be non-empty") + } + if object == "" { + return nil, errors.New("storage: object must be non-empty") + } + now := utcNow() + if err := validatePostPolicyV4Options(opts, now); err != nil { + return nil, err + } + + var signingFn func(hashedBytes []byte) ([]byte, error) + switch { + case opts.SignBytes != nil: + signingFn = opts.SignBytes + + case len(opts.PrivateKey) != 0: + parsedRSAPrivKey, err := parseKey(opts.PrivateKey) + if err != nil { + return nil, err + } + signingFn = func(hashedBytes []byte) ([]byte, error) { + return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes) + } + + default: + return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + + var descFields PolicyV4Fields + if opts.Fields != nil { + descFields = *opts.Fields + } + + if err := validateMetadata(descFields.Metadata); err != nil { + return nil, err + } + + // Build the policy. + conds := make([]PostPolicyV4Condition, len(opts.Conditions)) + copy(conds, opts.Conditions) + conds = append(conds, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + &singleValueCondition{"acl", descFields.ACL}, + &singleValueCondition{"cache-control", descFields.CacheControl}, + ) + + YYYYMMDD := now.Format(yearMonthDay) + policyFields := map[string]string{ + "key": object, + "x-goog-date": now.Format(iso8601), + "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + "x-goog-algorithm": "GOOG4-RSA-SHA256", + "success_action_redirect": descFields.RedirectToURLOnSuccess, + "acl": descFields.ACL, + } + for key, value := range descFields.Metadata { + conds = append(conds, &singleValueCondition{key, value}) + policyFields[key] = value + } + + // Following from the order expected by the conformance test cases, + // hence manually inserting these fields in a specific order. + conds = append(conds, + &singleValueCondition{"bucket", bucket}, + &singleValueCondition{"key", object}, + &singleValueCondition{"x-goog-date", now.Format(iso8601)}, + &singleValueCondition{ + name: "x-goog-credential", + value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + }, + &singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, + ) + + nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) + for _, cond := range conds { + if cond == nil || !cond.isEmpty() { + nonEmptyConds = append(nonEmptyConds, cond) + } + } + condsAsJSON, err := json.Marshal(map[string]interface{}{ + "conditions": nonEmptyConds, + "expiration": opts.Expires.Format(time.RFC3339), + }) + if err != nil { + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err) + } + + b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) + shaSum := sha256.Sum256([]byte(b64Policy)) + signature, err := signingFn(shaSum[:]) + if err != nil { + return nil, err + } + + policyFields["policy"] = b64Policy + policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) + + // Construct the URL. + scheme := "https" + if opts.Insecure { + scheme = "http" + } + path := opts.Style.path(bucket, "") + "/" + u := &url.URL{ + Path: path, + RawPath: pathEncodeV4(path), + Host: opts.Style.host(bucket), + Scheme: scheme, + } + + if descFields.StatusCodeOnSuccess > 0 { + policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) + } + + // Clear out fields with blanks values. + for key, value := range policyFields { + if value == "" { + delete(policyFields, key) + } + } + pp4 := &PostPolicyV4{ + Fields: policyFields, + URL: u.String(), + } + return pp4, nil +} + +// validatePostPolicyV4Options checks that: +// * GoogleAccessID is set +// * either but not both PrivateKey and SignBytes are set or nil, but not both +// * Expires, the deadline is not in the past +// * if Style is not set, it'll use PathStyle +func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { + if opts == nil || opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Expires.Before(now) { + return errors.New("storage: expecting Expires to be in the future") + } + if opts.Style == nil { + opts.Style = PathStyle() + } + return nil +} + +// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-", +// otherwise it will return an error. +func validateMetadata(hdrs map[string]string) (err error) { + if len(hdrs) == 0 { + return nil + } + + badKeys := make([]string, 0, len(hdrs)) + for key := range hdrs { + if !strings.HasPrefix(key, "x-goog-meta-") { + badKeys = append(badKeys, key) + } + } + if len(badKeys) != 0 { + err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) + } + return +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 5c83651b..d64f5ec7 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -86,6 +86,11 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // until the end. If offset is negative, the object is read abs(offset) bytes // from the end, and length must also be negative to indicate all remaining // bytes will be read. +// +// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies +// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding +// that file will be served back whole, regardless of the requested range as +// Google Cloud Storage dictates. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() @@ -160,10 +165,25 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) Body: string(body), } } - if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { res.Body.Close() return errors.New("storage: partial request not satisfied") } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + // If a generation hasn't been specified, and this is the first response we get, let's record the // generation. In future requests we'll use this generation as a precondition to avoid data races. if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { @@ -232,7 +252,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) body = emptyBody } var metaGen int64 - if res.Header.Get("X-Goog-Generation") != "" { + if res.Header.Get("X-Goog-Metageneration") != "" { metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) if err != nil { return nil, err @@ -268,6 +288,18 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) }, nil } +// decompressiveTranscoding returns true if the request was served decompressed +// and different than its original storage form. This happens when the "Content-Encoding" +// header is "gzip". +// See: +// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip +// * https://github.com/googleapis/google-cloud-go/issues/1800 +func decompressiveTranscoding(res *http.Response) bool { + // Decompressive Transcoding. + return res.Header.Get("Content-Encoding") == "gzip" || + res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" +} + func uncompressedByServer(res *http.Response) bool { // If the data is stored as gzip but is not encoded as gzip, then it // was uncompressed by the server. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index d35bd756..20d9518a 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -47,14 +47,19 @@ import ( htransport "google.golang.org/api/transport/http" ) +// Methods which can be used in signed URLs. +var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true} + var ( // ErrBucketNotExist indicates that the bucket does not exist. ErrBucketNotExist = errors.New("storage: bucket doesn't exist") // ErrObjectNotExist indicates that the object does not exist. ErrObjectNotExist = errors.New("storage: object doesn't exist") + // errMethodNotValid indicates that given HTTP method is not valid. + errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) ) -const userAgent = "gcloud-golang-storage/20151204" +var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo) const ( // ScopeFullControl grants permissions to manage your @@ -94,30 +99,44 @@ type Client struct { // NewClient creates a new Google Cloud Storage client. // The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - o := []option.ClientOption{ - option.WithScopes(ScopeFullControl), - option.WithUserAgent(userAgent), + var host, readHost, scheme string + + if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { + scheme = "https" + readHost = "storage.googleapis.com" + + // Prepend default options to avoid overriding options passed by the user. + opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...) + } else { + scheme = "http" + readHost = host + + opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) } - opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } - rawService, err := raw.New(hc) + rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc)) if err != nil { return nil, fmt.Errorf("storage client: %v", err) } - if ep != "" { - rawService.BasePath = ep - } - scheme := "https" - var host, readHost string - if host = os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { - scheme = "http" - readHost = host + if ep == "" { + // Override the default value for BasePath from the raw client. + // TODO: remove when the raw client uses this endpoint as its default (~end of 2020) + rawService.BasePath = "https://storage.googleapis.com/storage/v1/" } else { - readHost = "storage.googleapis.com" + // If the endpoint has been set explicitly, use this for the BasePath + // as well as readHost + rawService.BasePath = ep + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err) + } + readHost = u.Host } + return &Client{ hc: hc, raw: rawService, @@ -151,6 +170,80 @@ const ( SigningSchemeV4 ) +// URLStyle determines the style to use for the signed URL. pathStyle is the +// default. All non-default options work with V4 scheme only. See +// https://cloud.google.com/storage/docs/request-endpoints for details. +type URLStyle interface { + // host should return the host portion of the signed URL, not including + // the scheme (e.g. storage.googleapis.com). + host(bucket string) string + + // path should return the path portion of the signed URL, which may include + // both the bucket and object name or only the object name depending on the + // style. + path(bucket, object string) string +} + +type pathStyle struct{} + +type virtualHostedStyle struct{} + +type bucketBoundHostname struct { + hostname string +} + +func (s pathStyle) host(bucket string) string { + return "storage.googleapis.com" +} + +func (s virtualHostedStyle) host(bucket string) string { + return bucket + ".storage.googleapis.com" +} + +func (s bucketBoundHostname) host(bucket string) string { + return s.hostname +} + +func (s pathStyle) path(bucket, object string) string { + p := bucket + if object != "" { + p += "/" + object + } + return p +} + +func (s virtualHostedStyle) path(bucket, object string) string { + return object +} + +func (s bucketBoundHostname) path(bucket, object string) string { + return object +} + +// PathStyle is the default style, and will generate a URL of the form +// "storage.googleapis.com//". +func PathStyle() URLStyle { + return pathStyle{} +} + +// VirtualHostedStyle generates a URL relative to the bucket's virtual +// hostname, e.g. ".storage.googleapis.com/". +func VirtualHostedStyle() URLStyle { + return virtualHostedStyle{} +} + +// BucketBoundHostname generates a URL with a custom hostname tied to a +// specific GCS bucket. The desired hostname should be passed in using the +// hostname argument. Generated urls will be of the form +// "/". See +// https://cloud.google.com/storage/docs/request-endpoints#cname and +// https://cloud.google.com/load-balancing/docs/https/adding-backend-buckets-to-load-balancers +// for details. Note that for CNAMEs, only HTTP is supported, so Insecure must +// be set to true. +func BucketBoundHostname(hostname string) URLStyle { + return bucketBoundHostname{hostname: hostname} +} + // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. @@ -207,16 +300,37 @@ type SignedURLOptions struct { ContentType string // Headers is a list of extension headers the client must provide - // in order to use the generated signed URL. + // in order to use the generated signed URL. Each must be a string of the + // form "key:values", with multiple values separated by a semicolon. // Optional. Headers []string + // QueryParameters is a map of additional query parameters. When + // SigningScheme is V4, this is used in computing the signature, and the + // client must use the same query parameters when using the generated signed + // URL. + // Optional. + QueryParameters url.Values + // MD5 is the base64 encoded MD5 checksum of the file. // If provided, the client should provide the exact value on the request // header in order to use the signed URL. // Optional. MD5 string + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Only supported for V4 signing. + // Optional. + Style URLStyle + + // Insecure determines whether the signed URL should use HTTPS (default) or + // HTTP. + // Only supported for V4 signing. + // Optional. + Insecure bool + // Scheme determines the version of URL signing to use. Default is // SigningSchemeV2. Scheme SigningScheme @@ -368,8 +482,9 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error { if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") } - if opts.Method == "" { - return errors.New("storage: missing required method option") + opts.Method = strings.ToUpper(opts.Method) + if _, ok := signedURLMethods[opts.Method]; !ok { + return errMethodNotValid } if opts.Expires.IsZero() { return errors.New("storage: missing required expires option") @@ -380,6 +495,12 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error { return errors.New("storage: invalid MD5 checksum") } } + if opts.Style == nil { + opts.Style = PathStyle() + } + if _, ok := opts.Style.(pathStyle); !ok && opts.Scheme == SigningSchemeV2 { + return errors.New("storage: only path-style URLs are permitted with SigningSchemeV2") + } if opts.Scheme == SigningSchemeV4 { cutoff := now.Add(604801 * time.Second) // 7 days + 1 second if !opts.Expires.Before(cutoff) { @@ -411,19 +532,33 @@ func extractHeaderNames(kvs []string) []string { return res } +// pathEncodeV4 creates an encoded string that matches the v4 signature spec. +// Following the spec precisely is necessary in order to ensure that the URL +// and signing string are correctly formed, and Go's url.PathEncode and +// url.QueryEncode don't generate an exact match without some additional logic. +func pathEncodeV4(path string) string { + segments := strings.Split(path, "/") + var encodedSegments []string + for _, s := range segments { + encodedSegments = append(encodedSegments, url.QueryEscape(s)) + } + encodedStr := strings.Join(encodedSegments, "/") + encodedStr = strings.Replace(encodedStr, "+", "%20", -1) + return encodedStr +} + // signedURLV4 creates a signed URL using the sigV4 algorithm. func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { buf := &bytes.Buffer{} fmt.Fprintf(buf, "%s\n", opts.Method) - u := &url.URL{Path: bucket} - if name != "" { - u.Path += "/" + name - } + + u := &url.URL{Path: opts.Style.path(bucket, name)} + u.RawPath = pathEncodeV4(u.Path) // Note: we have to add a / here because GCS does so auto-magically, despite - // Go's EscapedPath not doing so (and we have to exactly match their + // our encoding not doing so (and we have to exactly match their // canonical query). - fmt.Fprintf(buf, "/%s\n", u.EscapedPath()) + fmt.Fprintf(buf, "/%s\n", u.RawPath) headerNames := append(extractHeaderNames(opts.Headers), "host") if opts.ContentType != "" { @@ -443,23 +578,55 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, "X-Goog-SignedHeaders": {signedHeaders}, } + // Add user-supplied query parameters to the canonical query string. For V4, + // it's necessary to include these. + for k, v := range opts.QueryParameters { + canonicalQueryString[k] = append(canonicalQueryString[k], v...) + } + fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) - u.Host = "storage.googleapis.com" + // Fill in the hostname based on the desired URL style. + u.Host = opts.Style.host(bucket) + + // Fill in the URL scheme. + if opts.Insecure { + u.Scheme = "http" + } else { + u.Scheme = "https" + } var headersWithValue []string headersWithValue = append(headersWithValue, "host:"+u.Host) headersWithValue = append(headersWithValue, opts.Headers...) if opts.ContentType != "" { - headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType)) + headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) } if opts.MD5 != "" { - headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5)) + headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5) + } + // Trim extra whitespace from headers and replace with a single space. + var trimmedHeaders []string + for _, h := range headersWithValue { + trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " ")) } - canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n") + canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n") fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) fmt.Fprintf(buf, "%s\n", signedHeaders) - fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + + // If the user provides a value for X-Goog-Content-SHA256, we must use + // that value in the request string. If not, we use UNSIGNED-PAYLOAD. + sha256Header := false + for _, h := range trimmedHeaders { + if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") { + sha256Header = true + fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1]) + break + } + } + if !sha256Header { + fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + } sum := sha256.Sum256(buf.Bytes()) hexDigest := hex.EncodeToString(sum[:]) @@ -491,7 +658,6 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st } signature := hex.EncodeToString(b) canonicalQueryString.Set("X-Goog-Signature", string(signature)) - u.Scheme = "https" u.RawQuery = canonicalQueryString.Encode() return u.String(), nil } @@ -964,11 +1130,11 @@ type ObjectAttrs struct { // data is rejected if its MD5 hash does not match this field. MD5 []byte - // CRC32C is the CRC32 checksum of the object's content using - // the Castagnoli93 polynomial. This field is read-only, except when - // used from a Writer. If set on a Writer and Writer.SendCRC32C - // is true, the uploaded data is rejected if its CRC32c hash does not - // match this field. + // CRC32C is the CRC32 checksum of the object's content using the Castagnoli93 + // polynomial. This field is read-only, except when used from a Writer or + // Composer. In those cases, if the SendCRC32C field in the Writer or Composer + // is set to is true, the uploaded data is rejected if its CRC32C hash does + // not match this field. CRC32C uint32 // MediaLink is an URL to the object's content. This field is read-only. @@ -989,13 +1155,12 @@ type ObjectAttrs struct { // of a particular object. This field is read-only. Metageneration int64 - // StorageClass is the storage class of the object. - // This value defines how objects in the bucket are stored and - // determines the SLA and the cost of storage. Typical values are - // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" - // and "DURABLE_REDUCED_AVAILABILITY". - // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" - // or "REGIONAL" depending on the bucket's location settings. + // StorageClass is the storage class of the object. This defines + // how objects are stored and determines the SLA and the cost of storage. + // Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". + // Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. StorageClass string // Created is the time the object was created. This field is read-only. @@ -1127,6 +1292,78 @@ type Query struct { // Versions indicates whether multiple versions of the same // object will be included in the results. Versions bool + + // fieldSelection is used to select only specific fields to be returned by + // the query. It's used internally and is populated for the user by + // calling Query.SetAttrSelection + fieldSelection string +} + +// attrToFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the API call. Only the ObjectAttrs field names are visible to users +// because they are already part of the public API of the package. +var attrToFieldMap = map[string]string{ + "Bucket": "bucket", + "Name": "name", + "ContentType": "contentType", + "ContentLanguage": "contentLanguage", + "CacheControl": "cacheControl", + "EventBasedHold": "eventBasedHold", + "TemporaryHold": "temporaryHold", + "RetentionExpirationTime": "retentionExpirationTime", + "ACL": "acl", + "Owner": "owner", + "ContentEncoding": "contentEncoding", + "ContentDisposition": "contentDisposition", + "Size": "size", + "MD5": "md5Hash", + "CRC32C": "crc32c", + "MediaLink": "mediaLink", + "Metadata": "metadata", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storageClass", + "CustomerKeySHA256": "customerEncryption", + "KMSKeyName": "kmsKeyName", + "Created": "timeCreated", + "Deleted": "timeDeleted", + "Updated": "updated", + "Etag": "etag", +} + +// SetAttrSelection makes the query populate only specific attributes of +// objects. When iterating over objects, if you only need each object's name +// and size, pass []string{"Name", "Size"} to this method. Only these fields +// will be fetched for each object across the network; the other fields of +// ObjectAttr will remain at their default values. This is a performance +// optimization; for more information, see +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance +func (q *Query) SetAttrSelection(attrs []string) error { + fieldSet := make(map[string]bool) + + for _, attr := range attrs { + field, ok := attrToFieldMap[attr] + if !ok { + return fmt.Errorf("storage: attr %v is not valid", attr) + } + fieldSet[field] = true + } + + if len(fieldSet) > 0 { + var b bytes.Buffer + b.WriteString("items(") + first := true + for field := range fieldSet { + if !first { + b.WriteString(",") + } + first = false + b.WriteString(field) + } + b.WriteString(")") + q.fieldSelection = b.String() + } + return nil } // Conditions constrain methods to act on specific generations of diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index a1165921..1843a814 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -45,12 +45,20 @@ type Writer struct { // Writer will attempt to send to the server in a single request. Objects // smaller than the size will be sent in a single request, while larger // objects will be split over multiple requests. The size will be rounded up - // to the nearest multiple of 256K. If zero, chunking will be disabled and - // the object will be uploaded in a single request. + // to the nearest multiple of 256K. // - // ChunkSize will default to a reasonable value. If you perform many concurrent - // writes of small objects, you may wish set ChunkSize to a value that matches - // your objects' sizes to avoid consuming large amounts of memory. + // ChunkSize will default to a reasonable value. If you perform many + // concurrent writes of small objects (under ~8MB), you may wish set ChunkSize + // to a value that matches your objects' sizes to avoid consuming large + // amounts of memory. See + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size + // for more information about performance trade-offs related to ChunkSize. + // + // If ChunkSize is set to zero, chunking will be disabled and the object will + // be uploaded in a single request without the use of a buffer. This will + // further reduce memory used during uploads, but will also prevent the writer + // from retrying in case of a transient error from the server, since a buffer + // is required in order to retry the failed request. // // ChunkSize must be set before the first Write call. ChunkSize int @@ -123,7 +131,8 @@ func (w *Writer) open() error { call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). Media(pr, mediaOpts...). Projection("full"). - Context(w.ctx) + Context(w.ctx). + Name(w.o.object) if w.ProgressFunc != nil { call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) @@ -149,14 +158,10 @@ func (w *Writer) open() error { } setClientHeader(call.Header()) - // The internals that perform call.Do automatically retry - // uploading chunks, hence no need to add retries here. - // See issue https://github.com/googleapis/google-cloud-go/issues/1507. - // - // However, since this whole call's internals involve making the initial - // resumable upload session, the first HTTP request is not retried. - // TODO: Follow-up with google.golang.org/gensupport to solve - // https://github.com/googleapis/google-api-go-client/issues/392. + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. resp, err = call.Do() } if err != nil { @@ -177,6 +182,9 @@ func (w *Writer) open() error { // error even though the write failed (or will fail). Always // use the error returned from Writer.Close to determine if // the upload was successful. +// +// Writes will be retried on transient errors from the server, unless +// Writer.ChunkSize has been set to zero. func (w *Writer) Write(p []byte) (n int, err error) { w.mu.Lock() werr := w.err diff --git a/vendor/cloud.google.com/go/tools.go b/vendor/cloud.google.com/go/tools.go new file mode 100644 index 00000000..da5ca585 --- /dev/null +++ b/vendor/cloud.google.com/go/tools.go @@ -0,0 +1,31 @@ +// +build tools + +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package exists to cause `go mod` and `go get` to believe these tools +// are dependencies, even though they are not runtime dependencies of any +// package (these are tools used by our CI builds). This means they will appear +// in our `go.mod` file, but will not be a part of the build. Also, since the +// build target is something non-existent, these should not be included in any +// binaries. + +package cloud + +import ( + _ "github.com/golang/protobuf/protoc-gen-go" + _ "github.com/jstemmer/go-junit-report" + _ "golang.org/x/lint/golint" + _ "golang.org/x/tools/cmd/goimports" +) diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE deleted file mode 100644 index 21253788..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go deleted file mode 100644 index ed749d4c..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ /dev/null @@ -1,218 +0,0 @@ -// Package cidr is a collection of assorted utilities for computing -// network and host addresses within network ranges. -// -// It expects a CIDR-type address structure where addresses are divided into -// some number of prefix bits representing the network and then the remaining -// suffix bits represent the host. -// -// For example, it can help to calculate addresses for sub-networks of a -// parent network, or to calculate host addresses within a particular prefix. -// -// At present this package is prioritizing simplicity of implementation and -// de-prioritizing speed and memory usage. Thus caution is advised before -// using this package in performance-critical applications or hot codepaths. -// Patches to improve the speed and memory usage may be accepted as long as -// they do not result in a significant increase in code complexity. -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -// Subnet takes a parent CIDR range and creates a subnet within it -// with the given number of additional prefix bits and the given -// network number. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number -// of 5, becomes 10.3.5.0/24 . -func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - newPrefixLen := parentLen + newBits - - if newPrefixLen > addrLen { - return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) - } - - maxNetNum := uint64(1< maxNetNum { - return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) - } - - return &net.IPNet{ - IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), - Mask: net.CIDRMask(newPrefixLen, addrLen), - }, nil -} - -// Host takes a parent CIDR range and turns it into a host IP address with -// the given host number. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func Host(base *net.IPNet, num int) (net.IP, error) { - ip := base.IP - mask := base.Mask - bigNum := big.NewInt(int64(num)) - - parentLen, addrLen := mask.Size() - hostLen := addrLen - parentLen - - maxHostNum := big.NewInt(int64(1)) - maxHostNum.Lsh(maxHostNum, uint(hostLen)) - maxHostNum.Sub(maxHostNum, big.NewInt(1)) - - numUint64 := big.NewInt(int64(bigNum.Uint64())) - if bigNum.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(bigNum) - numUint64.Sub(numUint64, big.NewInt(int64(1))) - bigNum.Sub(maxHostNum, numUint64) - } - - if numUint64.Cmp(maxHostNum) == 1 { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) - } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, bigNum, bitlength), nil -} - -// AddressRange returns the first and last addresses in the given CIDR range. -func AddressRange(network *net.IPNet) (net.IP, net.IP) { - // the first IP is easy - firstIP := network.IP - - // the last IP is the network address OR NOT the mask address - prefixLen, bits := network.Mask.Size() - if prefixLen == bits { - // Easy! - // But make sure that our two slices are distinct, since they - // would be in all other cases. - lastIP := make([]byte, len(firstIP)) - copy(lastIP, firstIP) - return firstIP, lastIP - } - - firstIPInt, bits := ipToInt(firstIP) - hostLen := uint(bits) - uint(prefixLen) - lastIPInt := big.NewInt(1) - lastIPInt.Lsh(lastIPInt, hostLen) - lastIPInt.Sub(lastIPInt, big.NewInt(1)) - lastIPInt.Or(lastIPInt, firstIPInt) - - return firstIP, intToIP(lastIPInt, bits) -} - -// AddressCount returns the number of distinct host addresses within the given -// CIDR range. -// -// Since the result is a uint64, this function returns meaningful information -// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. -func AddressCount(network *net.IPNet) uint64 { - prefixLen, bits := network.Mask.Size() - return 1 << (uint64(bits) - uint64(prefixLen)) -} - -//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies -//none of the subnets overlap and all subnets are in the supernet -//it returns an error if any of those conditions are not satisfied -func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { - firstLastIP := make([][]net.IP, len(subnets)) - for i, s := range subnets { - first, last := AddressRange(s) - firstLastIP[i] = []net.IP{first, last} - } - for i, s := range subnets { - if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { - return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) - } - for j := 0; j < len(subnets); j++ { - if i == j { - continue - } - - first := firstLastIP[j][0] - last := firstLastIP[j][1] - if s.Contains(first) || s.Contains(last) { - return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) - } - } - } - return nil -} - -// PreviousSubnet returns the subnet of the desired mask in the IP space -// just lower than the start of IPNet provided. If the IP space rolls over -// then the second return value is true -func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - startIP := checkIPv4(network.IP) - previousIP := make(net.IP, len(startIP)) - copy(previousIP, startIP) - cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) - previousIP = Dec(previousIP) - previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} - if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { - return previous, true - } - return previous, false -} - -// NextSubnet returns the next available subnet of the desired mask size -// starting for the maximum IP of the offset subnet -// If the IP exceeds the maxium IP then the second return value is true -func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - _, currentLast := AddressRange(network) - mask := net.CIDRMask(prefixLen, 8*len(currentLast)) - currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} - _, last := AddressRange(currentSubnet) - last = Inc(last) - next := &net.IPNet{IP: last.Mask(mask), Mask: mask} - if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { - return next, true - } - return next, false -} - -//Inc increases the IP by one this returns a new []byte for the IP -func Inc(IP net.IP) net.IP { - IP = checkIPv4(IP) - incIP := make([]byte, len(IP)) - copy(incIP, IP) - for j := len(incIP) - 1; j >= 0; j-- { - incIP[j]++ - if incIP[j] > 0 { - break - } - } - return incIP -} - -//Dec decreases the IP by one this returns a new []byte for the IP -func Dec(IP net.IP) net.IP { - IP = checkIPv4(IP) - decIP := make([]byte, len(IP)) - copy(decIP, IP) - decIP = checkIPv4(decIP) - for j := len(decIP) - 1; j >= 0; j-- { - decIP[j]-- - if decIP[j] < 255 { - break - } - } - return decIP -} - -func checkIPv4(ip net.IP) net.IP { - // Go for some reason allocs IPv6len for IPv4 so we have to correct it - if v4 := ip.To4(); v4 != nil { - return v4 - } - return ip -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go deleted file mode 100644 index e5e6a2cf..00000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ /dev/null @@ -1,37 +0,0 @@ -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -func ipToInt(ip net.IP) (*big.Int, int) { - val := &big.Int{} - val.SetBytes([]byte(ip)) - if len(ip) == net.IPv4len { - return val, 32 - } else if len(ip) == net.IPv6len { - return val, 128 - } else { - panic(fmt.Errorf("Unsupported address length %d", len(ip))) - } -} - -func intToIP(ipInt *big.Int, bits int) net.IP { - ipBytes := ipInt.Bytes() - ret := make([]byte, bits/8) - // Pack our IP bytes into the end of the return array, - // since big.Int.Bytes() removes front zero padding. - for i := 1; i <= len(ipBytes); i++ { - ret[len(ret)-i] = ipBytes[len(ipBytes)-i] - } - return net.IP(ret) -} - -func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { - ipInt, totalBits := ipToInt(ip) - bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) - ipInt.Or(ipInt, bigNum) - return intToIP(ipInt, totalBits) -} diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6..00000000 --- a/vendor/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE deleted file mode 100644 index a5df10e6..00000000 --- a/vendor/github.com/armon/go-radix/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md deleted file mode 100644 index 26f42a28..00000000 --- a/vendor/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,38 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod deleted file mode 100644 index 4336aa29..00000000 --- a/vendor/github.com/armon/go-radix/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/armon/go-radix diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go deleted file mode 100644 index e2bb22eb..00000000 --- a/vendor/github.com/armon/go-radix/radix.go +++ /dev/null @@ -1,540 +0,0 @@ -package radix - -import ( - "sort" - "strings" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(s string, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - key string - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix string - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf != nil -} - -func (n *node) addEdge(e edge) { - n.edges = append(n.edges, e) - n.edges.Sort() -} - -func (n *node) updateEdge(label byte, node *node) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - n.edges[idx].node = node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -func (n *node) delEdge(label byte) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - copy(n.edges[idx:], n.edges[idx+1:]) - n.edges[len(n.edges)-1] = edge{} - n.edges = n.edges[:len(n.edges)-1] - } -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } - - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.updateEdge(search[0], child) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - var parent *node - var label byte - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - parent = n - label = search[0] - n = n.getEdge(label) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should delete this node from the parent - if parent != nil && len(n.edges) == 0 { - parent.delEdge(label) - } - - // Check if we should merge this node - if n != t.root && len(n.edges) == 1 { - n.mergeChild() - } - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - - return leaf.val, true -} - -// DeletePrefix is used to delete the subtree under a prefix -// Returns how many nodes were deleted -// Use this to delete large subtrees efficiently -func (t *Tree) DeletePrefix(s string) int { - return t.deletePrefix(nil, t.root, s) -} - -// delete does a recursive deletion -func (t *Tree) deletePrefix(parent, n *node, prefix string) int { - // Check for key exhaustion - if len(prefix) == 0 { - // Remove the leaf node - subTreeSize := 0 - //recursively walk from all edges of the node to be deleted - recursiveWalk(n, func(s string, v interface{}) bool { - subTreeSize++ - return false - }) - if n.isLeaf() { - n.leaf = nil - } - n.edges = nil // deletes the entire subtree - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - t.size -= subTreeSize - return subTreeSize - } - - // Look for an edge - label := prefix[0] - child := n.getEdge(label) - if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { - return 0 - } - - // Consume the search prefix - if len(child.prefix) > len(prefix) { - prefix = prefix[len(prefix):] - } else { - prefix = prefix[len(child.prefix):] - } - return t.deletePrefix(n, child, prefix) -} - -func (n *node) mergeChild() { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - break - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore deleted file mode 100644 index 9e131146..00000000 --- a/vendor/github.com/bgentry/speakeasy/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -example/example -example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE deleted file mode 100644 index 37d60fc3..00000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -MIT License - -Copyright (c) 2017 Blake Gentry - -This license applies to the non-Windows portions of this library. The Windows -portion maintains its own Apache 2.0 license. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS deleted file mode 100644 index ff177f61..00000000 --- a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [2013] [the CloudFoundry Authors] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md deleted file mode 100644 index fceda751..00000000 --- a/vendor/github.com/bgentry/speakeasy/Readme.md +++ /dev/null @@ -1,30 +0,0 @@ -# Speakeasy - -This package provides cross-platform Go (#golang) helpers for taking user input -from the terminal while not echoing the input back (similar to `getpasswd`). The -package uses syscalls to avoid any dependence on cgo, and is therefore -compatible with cross-compiling. - -[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] - -## Unicode - -Multi-byte unicode characters work successfully on Mac OS X. On Windows, -however, this may be problematic (as is UTF in general on Windows). Other -platforms have not been tested. - -## License - -The code herein was not written by me, but was compiled from two separate open -source packages. Unix portions were imported from [gopass][gopass], while -Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s -[Windows terminal helpers][cf-ui-windows]. - -The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly -from the source (though I attempted to fill in the correct owner in the -boilerplate copyright notice). - -[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" -[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" -[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" -[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go deleted file mode 100644 index 71c1dd1b..00000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy.go +++ /dev/null @@ -1,49 +0,0 @@ -package speakeasy - -import ( - "fmt" - "io" - "os" - "strings" -) - -// Ask the user to enter a password with input hidden. prompt is a string to -// display before the user's input. Returns the provided password, or an error -// if the command failed. -func Ask(prompt string) (password string, err error) { - return FAsk(os.Stdout, prompt) -} - -// FAsk is the same as Ask, except it is possible to specify the file to write -// the prompt to. If 'nil' is passed as the writer, no prompt will be written. -func FAsk(wr io.Writer, prompt string) (password string, err error) { - if wr != nil && prompt != "" { - fmt.Fprint(wr, prompt) // Display the prompt. - } - password, err = getPassword() - - // Carriage return after the user input. - if wr != nil { - fmt.Fprintln(wr, "") - } - return -} - -func readline() (value string, err error) { - var valb []byte - var n int - b := make([]byte, 1) - for { - // read one byte at a time so we don't accidentally read extra bytes - n, err = os.Stdin.Read(b) - if err != nil && err != io.EOF { - return "", err - } - if n == 0 || b[0] == '\n' { - break - } - valb = append(valb, b[0]) - } - - return strings.TrimSuffix(string(valb), "\r"), nil -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go deleted file mode 100644 index d99fda19..00000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go +++ /dev/null @@ -1,93 +0,0 @@ -// based on https://code.google.com/p/gopass -// Author: johnsiilver@gmail.com (John Doak) -// -// Original code is based on code by RogerV in the golang-nuts thread: -// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package speakeasy - -import ( - "fmt" - "os" - "os/signal" - "strings" - "syscall" -) - -const sttyArg0 = "/bin/stty" - -var ( - sttyArgvEOff = []string{"stty", "-echo"} - sttyArgvEOn = []string{"stty", "echo"} -) - -// getPassword gets input hidden from the terminal from a user. This is -// accomplished by turning off terminal echo, reading input from the user and -// finally turning on terminal echo. -func getPassword() (password string, err error) { - sig := make(chan os.Signal, 10) - brk := make(chan bool) - - // File descriptors for stdin, stdout, and stderr. - fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} - - // Setup notifications of termination signals to channel sig, create a process to - // watch for these signals so we can turn back on echo if need be. - signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, - syscall.SIGTERM) - go catchSignal(fd, sig, brk) - - // Turn off the terminal echo. - pid, err := echoOff(fd) - if err != nil { - return "", err - } - - // Turn on the terminal echo and stop listening for signals. - defer signal.Stop(sig) - defer close(brk) - defer echoOn(fd) - - syscall.Wait4(pid, nil, 0, nil) - - line, err := readline() - if err == nil { - password = strings.TrimSpace(line) - } else { - err = fmt.Errorf("failed during password entry: %s", err) - } - - return password, err -} - -// echoOff turns off the terminal echo. -func echoOff(fd []uintptr) (int, error) { - pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) - if err != nil { - return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) - } - return pid, nil -} - -// echoOn turns back on the terminal echo. -func echoOn(fd []uintptr) { - // Turn on the terminal echo. - pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) - if e == nil { - syscall.Wait4(pid, nil, 0, nil) - } -} - -// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn -// terminal echo back on before the program ends. Otherwise the user is left -// with echo off on their terminal. -func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { - select { - case <-sig: - echoOn(fd) - os.Exit(-1) - case <-brk: - } -} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go deleted file mode 100644 index c2093a80..00000000 --- a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package speakeasy - -import ( - "syscall" -) - -// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx -const ENABLE_ECHO_INPUT = 0x0004 - -func getPassword() (password string, err error) { - var oldMode uint32 - - err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) - if err != nil { - return - } - - var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) - - err = setConsoleMode(syscall.Stdin, newMode) - defer setConsoleMode(syscall.Stdin, oldMode) - if err != nil { - return - } - - return readline() -} - -func setConsoleMode(console syscall.Handle, mode uint32) (err error) { - dll := syscall.MustLoadDLL("kernel32") - proc := dll.MustFindProc("SetConsoleMode") - r, _, err := proc.Call(uintptr(console), uintptr(mode)) - - if r == 0 { - return err - } - return nil -} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml deleted file mode 100644 index 102fb9a6..00000000 --- a/vendor/github.com/blang/semver/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -matrix: - include: - - go: 1.4.3 - - go: 1.5.4 - - go: 1.6.3 - - go: 1.7 - - go: tip - allow_failures: - - go: tip -install: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -script: -- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci - -repotoken $COVERALLS_TOKEN -- echo "Build examples" ; cd examples && go build -- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) -env: - global: - secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86f..00000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 08b2e4a3..00000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Wildcards `>=1.x`, `<=2.5.x` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -Note that spaces between the operator and the version will be gracefully tolerated. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c4..00000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json deleted file mode 100644 index 1cf8ebdd..00000000 --- a/vendor/github.com/blang/semver/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "author": "blang", - "bugs": { - "URL": "https://github.com/blang/semver/issues", - "url": "https://github.com/blang/semver/issues" - }, - "gx": { - "dvcsimport": "github.com/blang/semver" - }, - "gxVersion": "0.10.0", - "language": "go", - "license": "MIT", - "name": "semver", - "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.5.1" -} - diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d4..00000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842e..00000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f8808..00000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d8026..00000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/bmatcuk/doublestar/.gitignore b/vendor/github.com/bmatcuk/doublestar/.gitignore deleted file mode 100644 index af212ecc..00000000 --- a/vendor/github.com/bmatcuk/doublestar/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# vi -*~ -*.swp -*.swo - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# test directory -test/ diff --git a/vendor/github.com/bmatcuk/doublestar/.travis.yml b/vendor/github.com/bmatcuk/doublestar/.travis.yml deleted file mode 100644 index ec4fee88..00000000 --- a/vendor/github.com/bmatcuk/doublestar/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.11 - - 1.12 - -before_install: - - go get -t -v ./... - -script: - - go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/bmatcuk/doublestar/LICENSE b/vendor/github.com/bmatcuk/doublestar/LICENSE deleted file mode 100644 index 309c9d1d..00000000 --- a/vendor/github.com/bmatcuk/doublestar/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Bob Matcuk - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/bmatcuk/doublestar/README.md b/vendor/github.com/bmatcuk/doublestar/README.md deleted file mode 100644 index 8e365c5e..00000000 --- a/vendor/github.com/bmatcuk/doublestar/README.md +++ /dev/null @@ -1,109 +0,0 @@ -![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master) -[![Build Status](https://travis-ci.org/bmatcuk/doublestar.svg?branch=master)](https://travis-ci.org/bmatcuk/doublestar) -[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) - -# doublestar - -**doublestar** is a [golang](http://golang.org/) implementation of path pattern -matching and globbing with support for "doublestar" (aka globstar: `**`) -patterns. - -doublestar patterns match files and directories recursively. For example, if -you had the following directory structure: - -``` -grandparent -`-- parent - |-- child1 - `-- child2 -``` - -You could find the children with patterns such as: `**/child*`, -`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will -return all files and directories recursively). - -Bash's globstar is doublestar's inspiration and, as such, works similarly. -Note that the doublestar must appear as a path component by itself. A pattern -such as `/path**` is invalid and will be treated the same as `/path*`, but -`/path*/**` should achieve the desired result. Additionally, `/path/**` will -match all directories and files under the path directory, but `/path/**/` will -only match directories. - -## Installation - -**doublestar** can be installed via `go get`: - -```bash -go get github.com/bmatcuk/doublestar -``` - -To use it in your code, you must import it: - -```go -import "github.com/bmatcuk/doublestar" -``` - -## Functions - -### Match -```go -func Match(pattern, name string) (bool, error) -``` - -Match returns true if `name` matches the file name `pattern` -([see below](#patterns)). `name` and `pattern` are split on forward slash (`/`) -characters and may be relative or absolute. - -Note: `Match()` is meant to be a drop-in replacement for `path.Match()`. As -such, it always uses `/` as the path separator. If you are writing code that -will run on systems where `/` is not the path separator (such as Windows), you -want to use `PathMatch()` (below) instead. - - -### PathMatch -```go -func PathMatch(pattern, name string) (bool, error) -``` - -PathMatch returns true if `name` matches the file name `pattern` -([see below](#patterns)). The difference between Match and PathMatch is that -PathMatch will automatically use your system's path separator to split `name` -and `pattern`. - -`PathMatch()` is meant to be a drop-in replacement for `filepath.Match()`. - -### Glob -```go -func Glob(pattern string) ([]string, error) -``` - -Glob finds all files and directories in the filesystem that match `pattern` -([see below](#patterns)). `pattern` may be relative (to the current working -directory), or absolute. - -`Glob()` is meant to be a drop-in replacement for `filepath.Glob()`. - -## Patterns - -**doublestar** supports the following special terms in the patterns: - -Special Terms | Meaning -------------- | ------- -`*` | matches any sequence of non-path-separators -`**` | matches any sequence of characters, including path separators -`?` | matches any single non-path-separator character -`[class]` | matches any single non-path-separator character against a class of characters ([see below](#character-classes)) -`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches - -Any character with a special meaning can be escaped with a backslash (`\`). - -### Character Classes - -Character classes support the following: - -Class | Meaning ----------- | ------- -`[abc]` | matches any single character within the set -`[a-z]` | matches any single character in the range -`[^class]` | matches any single character which does *not* match the class - diff --git a/vendor/github.com/bmatcuk/doublestar/doublestar.go b/vendor/github.com/bmatcuk/doublestar/doublestar.go deleted file mode 100644 index 0044dfa8..00000000 --- a/vendor/github.com/bmatcuk/doublestar/doublestar.go +++ /dev/null @@ -1,476 +0,0 @@ -package doublestar - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strings" - "unicode/utf8" -) - -// ErrBadPattern indicates a pattern was malformed. -var ErrBadPattern = path.ErrBadPattern - -// Split a path on the given separator, respecting escaping. -func splitPathOnSeparator(path string, separator rune) (ret []string) { - idx := 0 - if separator == '\\' { - // if the separator is '\\', then we can just split... - ret = strings.Split(path, string(separator)) - idx = len(ret) - } else { - // otherwise, we need to be careful of situations where the separator was escaped - cnt := strings.Count(path, string(separator)) - if cnt == 0 { - return []string{path} - } - - ret = make([]string, cnt+1) - pathlen := len(path) - separatorLen := utf8.RuneLen(separator) - emptyEnd := false - for start := 0; start < pathlen; { - end := indexRuneWithEscaping(path[start:], separator) - if end == -1 { - emptyEnd = false - end = pathlen - } else { - emptyEnd = true - end += start - } - ret[idx] = path[start:end] - start = end + separatorLen - idx++ - } - - // If the last rune is a path separator, we need to append an empty string to - // represent the last, empty path component. By default, the strings from - // make([]string, ...) will be empty, so we just need to icrement the count - if emptyEnd { - idx++ - } - } - - return ret[:idx] -} - -// Find the first index of a rune in a string, -// ignoring any times the rune is escaped using "\". -func indexRuneWithEscaping(s string, r rune) int { - end := strings.IndexRune(s, r) - if end == -1 { - return -1 - } - if end > 0 && s[end-1] == '\\' { - start := end + utf8.RuneLen(r) - end = indexRuneWithEscaping(s[start:], r) - if end != -1 { - end += start - } - } - return end -} - -// Match returns true if name matches the shell file name pattern. -// The pattern syntax is: -// -// pattern: -// { term } -// term: -// '*' matches any sequence of non-path-separators -// '**' matches any sequence of characters, including -// path separators. -// '?' matches any single non-path-separator character -// '[' [ '^' ] { character-range } ']' -// character class (must be non-empty) -// '{' { term } [ ',' { term } ... ] '}' -// c matches character c (c != '*', '?', '\\', '[') -// '\\' c matches character c -// -// character-range: -// c matches character c (c != '\\', '-', ']') -// '\\' c matches character c -// lo '-' hi matches character c for lo <= c <= hi -// -// Match requires pattern to match all of name, not just a substring. -// The path-separator defaults to the '/' character. The only possible -// returned error is ErrBadPattern, when pattern is malformed. -// -// Note: this is meant as a drop-in replacement for path.Match() which -// always uses '/' as the path separator. If you want to support systems -// which use a different path separator (such as Windows), what you want -// is the PathMatch() function below. -// -func Match(pattern, name string) (bool, error) { - return matchWithSeparator(pattern, name, '/') -} - -// PathMatch is like Match except that it uses your system's path separator. -// For most systems, this will be '/'. However, for Windows, it would be '\\'. -// Note that for systems where the path separator is '\\', escaping is -// disabled. -// -// Note: this is meant as a drop-in replacement for filepath.Match(). -// -func PathMatch(pattern, name string) (bool, error) { - return matchWithSeparator(pattern, name, os.PathSeparator) -} - -// Match returns true if name matches the shell file name pattern. -// The pattern syntax is: -// -// pattern: -// { term } -// term: -// '*' matches any sequence of non-path-separators -// '**' matches any sequence of characters, including -// path separators. -// '?' matches any single non-path-separator character -// '[' [ '^' ] { character-range } ']' -// character class (must be non-empty) -// '{' { term } [ ',' { term } ... ] '}' -// c matches character c (c != '*', '?', '\\', '[') -// '\\' c matches character c -// -// character-range: -// c matches character c (c != '\\', '-', ']') -// '\\' c matches character c, unless separator is '\\' -// lo '-' hi matches character c for lo <= c <= hi -// -// Match requires pattern to match all of name, not just a substring. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -func matchWithSeparator(pattern, name string, separator rune) (bool, error) { - patternComponents := splitPathOnSeparator(pattern, separator) - nameComponents := splitPathOnSeparator(name, separator) - return doMatching(patternComponents, nameComponents) -} - -func doMatching(patternComponents, nameComponents []string) (matched bool, err error) { - // check for some base-cases - patternLen, nameLen := len(patternComponents), len(nameComponents) - if patternLen == 0 && nameLen == 0 { - return true, nil - } - if patternLen == 0 || nameLen == 0 { - return false, nil - } - - patIdx, nameIdx := 0, 0 - for patIdx < patternLen && nameIdx < nameLen { - if patternComponents[patIdx] == "**" { - // if our last pattern component is a doublestar, we're done - - // doublestar will match any remaining name components, if any. - if patIdx++; patIdx >= patternLen { - return true, nil - } - - // otherwise, try matching remaining components - for ; nameIdx < nameLen; nameIdx++ { - if m, _ := doMatching(patternComponents[patIdx:], nameComponents[nameIdx:]); m { - return true, nil - } - } - return false, nil - } - - // try matching components - matched, err = matchComponent(patternComponents[patIdx], nameComponents[nameIdx]) - if !matched || err != nil { - return - } - - patIdx++ - nameIdx++ - } - return patIdx >= patternLen && nameIdx >= nameLen, nil -} - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of pattern is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// Your system path separator is automatically used. This means on -// systems where the separator is '\\' (Windows), escaping will be -// disabled. -// -// Note: this is meant as a drop-in replacement for filepath.Glob(). -// -func Glob(pattern string) (matches []string, err error) { - patternComponents := splitPathOnSeparator(filepath.ToSlash(pattern), '/') - if len(patternComponents) == 0 { - return nil, nil - } - - // On Windows systems, this will return the drive name ('C:') for filesystem - // paths, or \\\ for UNC paths. On other systems, it will - // return an empty string. Since absolute paths on non-Windows systems start - // with a slash, patternComponent[0] == volumeName will return true for both - // absolute Windows paths and absolute non-Windows paths, but we need a - // separate check for UNC paths. - volumeName := filepath.VolumeName(pattern) - isWindowsUNC := strings.HasPrefix(pattern, `\\`) - if isWindowsUNC || patternComponents[0] == volumeName { - startComponentIndex := 1 - if isWindowsUNC { - startComponentIndex = 4 - } - return doGlob(fmt.Sprintf("%s%s", volumeName, string(os.PathSeparator)), patternComponents[startComponentIndex:], matches) - } - - // otherwise, it's a relative pattern - return doGlob(".", patternComponents, matches) -} - -// Perform a glob -func doGlob(basedir string, components, matches []string) (m []string, e error) { - m = matches - e = nil - - // figure out how many components we don't need to glob because they're - // just names without patterns - we'll use os.Lstat below to check if that - // path actually exists - patLen := len(components) - patIdx := 0 - for ; patIdx < patLen; patIdx++ { - if strings.IndexAny(components[patIdx], "*?[{\\") >= 0 { - break - } - } - if patIdx > 0 { - basedir = filepath.Join(basedir, filepath.Join(components[0:patIdx]...)) - } - - // Lstat will return an error if the file/directory doesn't exist - fi, err := os.Lstat(basedir) - if err != nil { - return - } - - // if there are no more components, we've found a match - if patIdx >= patLen { - m = append(m, basedir) - return - } - - // otherwise, we need to check each item in the directory... - // first, if basedir is a symlink, follow it... - if (fi.Mode() & os.ModeSymlink) != 0 { - fi, err = os.Stat(basedir) - if err != nil { - return - } - } - - // confirm it's a directory... - if !fi.IsDir() { - return - } - - // read directory - dir, err := os.Open(basedir) - if err != nil { - return - } - defer dir.Close() - - files, _ := dir.Readdir(-1) - lastComponent := (patIdx + 1) >= patLen - if components[patIdx] == "**" { - // if the current component is a doublestar, we'll try depth-first - for _, file := range files { - // if symlink, we may want to follow - if (file.Mode() & os.ModeSymlink) != 0 { - file, err = os.Stat(filepath.Join(basedir, file.Name())) - if err != nil { - continue - } - } - - if file.IsDir() { - // recurse into directories - if lastComponent { - m = append(m, filepath.Join(basedir, file.Name())) - } - m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx:], m) - } else if lastComponent { - // if the pattern's last component is a doublestar, we match filenames, too - m = append(m, filepath.Join(basedir, file.Name())) - } - } - if lastComponent { - return // we're done - } - patIdx++ - lastComponent = (patIdx + 1) >= patLen - } - - // check items in current directory and recurse - var match bool - for _, file := range files { - match, e = matchComponent(components[patIdx], file.Name()) - if e != nil { - return - } - if match { - if lastComponent { - m = append(m, filepath.Join(basedir, file.Name())) - } else { - m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx+1:], m) - } - } - } - return -} - -// Attempt to match a single pattern component with a path component -func matchComponent(pattern, name string) (bool, error) { - // check some base cases - patternLen, nameLen := len(pattern), len(name) - if patternLen == 0 && nameLen == 0 { - return true, nil - } - if patternLen == 0 { - return false, nil - } - if nameLen == 0 && pattern != "*" { - return false, nil - } - - // check for matches one rune at a time - patIdx, nameIdx := 0, 0 - for patIdx < patternLen && nameIdx < nameLen { - patRune, patAdj := utf8.DecodeRuneInString(pattern[patIdx:]) - nameRune, nameAdj := utf8.DecodeRuneInString(name[nameIdx:]) - if patRune == '\\' { - // handle escaped runes - patIdx += patAdj - patRune, patAdj = utf8.DecodeRuneInString(pattern[patIdx:]) - if patRune == utf8.RuneError { - return false, ErrBadPattern - } else if patRune == nameRune { - patIdx += patAdj - nameIdx += nameAdj - } else { - return false, nil - } - } else if patRune == '*' { - // handle stars - if patIdx += patAdj; patIdx >= patternLen { - // a star at the end of a pattern will always - // match the rest of the path - return true, nil - } - - // check if we can make any matches - for ; nameIdx < nameLen; nameIdx += nameAdj { - if m, _ := matchComponent(pattern[patIdx:], name[nameIdx:]); m { - return true, nil - } - } - return false, nil - } else if patRune == '[' { - // handle character sets - patIdx += patAdj - endClass := indexRuneWithEscaping(pattern[patIdx:], ']') - if endClass == -1 { - return false, ErrBadPattern - } - endClass += patIdx - classRunes := []rune(pattern[patIdx:endClass]) - classRunesLen := len(classRunes) - if classRunesLen > 0 { - classIdx := 0 - matchClass := false - if classRunes[0] == '^' { - classIdx++ - } - for classIdx < classRunesLen { - low := classRunes[classIdx] - if low == '-' { - return false, ErrBadPattern - } - classIdx++ - if low == '\\' { - if classIdx < classRunesLen { - low = classRunes[classIdx] - classIdx++ - } else { - return false, ErrBadPattern - } - } - high := low - if classIdx < classRunesLen && classRunes[classIdx] == '-' { - // we have a range of runes - if classIdx++; classIdx >= classRunesLen { - return false, ErrBadPattern - } - high = classRunes[classIdx] - if high == '-' { - return false, ErrBadPattern - } - classIdx++ - if high == '\\' { - if classIdx < classRunesLen { - high = classRunes[classIdx] - classIdx++ - } else { - return false, ErrBadPattern - } - } - } - if low <= nameRune && nameRune <= high { - matchClass = true - } - } - if matchClass == (classRunes[0] == '^') { - return false, nil - } - } else { - return false, ErrBadPattern - } - patIdx = endClass + 1 - nameIdx += nameAdj - } else if patRune == '{' { - // handle alternatives such as {alt1,alt2,...} - patIdx += patAdj - endOptions := indexRuneWithEscaping(pattern[patIdx:], '}') - if endOptions == -1 { - return false, ErrBadPattern - } - endOptions += patIdx - options := splitPathOnSeparator(pattern[patIdx:endOptions], ',') - patIdx = endOptions + 1 - for _, o := range options { - m, e := matchComponent(o+pattern[patIdx:], name[nameIdx:]) - if e != nil { - return false, e - } - if m { - return true, nil - } - } - return false, nil - } else if patRune == '?' || patRune == nameRune { - // handle single-rune wildcard - patIdx += patAdj - nameIdx += nameAdj - } else { - return false, nil - } - } - if patIdx >= patternLen && nameIdx >= nameLen { - return true, nil - } - if nameIdx >= nameLen && pattern[patIdx:] == "*" || pattern[patIdx:] == "**" { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/bmatcuk/doublestar/go.mod b/vendor/github.com/bmatcuk/doublestar/go.mod deleted file mode 100644 index ce1688f7..00000000 --- a/vendor/github.com/bmatcuk/doublestar/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/bmatcuk/doublestar - -go 1.12 diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 00000000..eac1c766 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go b/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go new file mode 100644 index 00000000..fd2f51d8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go @@ -0,0 +1,398 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gengogrpc contains the gRPC code generator. +package gengogrpc + +import ( + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/compiler/protogen" + + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + contextPackage = protogen.GoImportPath("context") + grpcPackage = protogen.GoImportPath("google.golang.org/grpc") + codesPackage = protogen.GoImportPath("google.golang.org/grpc/codes") + statusPackage = protogen.GoImportPath("google.golang.org/grpc/status") +) + +// GenerateFile generates a _grpc.pb.go file containing gRPC service definitions. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + if len(file.Services) == 0 { + return nil + } + filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") + g.P() + g.P("package ", file.GoPackageName) + g.P() + GenerateFileContent(gen, file, g) + return g +} + +// GenerateFileContent generates the gRPC service definitions, excluding the package statement. +func GenerateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { + if len(file.Services) == 0 { + return + } + + // TODO: Remove this. We don't need to include these references any more. + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPackage.Ident("Context")) + g.P("var _ ", grpcPackage.Ident("ClientConnInterface")) + g.P() + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion6")) + g.P() + for _, service := range file.Services { + genService(gen, file, g, service) + } +} + +func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + clientName := service.GoName + "Client" + + g.P("// ", clientName, " is the client API for ", service.GoName, " service.") + g.P("//") + g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.") + + // Client interface. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(clientName, service.Location) + g.P("type ", clientName, " interface {") + for _, method := range service.Methods { + g.Annotate(clientName+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + clientSignature(g, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(clientName), " struct {") + g.P("cc ", grpcPackage.Ident("ClientConnInterface")) + g.P("}") + g.P() + + // NewClient factory. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") + g.P("return &", unexport(clientName), "{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + // Client method implementations. + for _, method := range service.Methods { + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + // Unary RPC method + genClientMethod(gen, file, g, method, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + genClientMethod(gen, file, g, method, streamIndex) + streamIndex++ + } + } + + // Server interface. + serverType := service.GoName + "Server" + g.P("// ", serverType, " is the server API for ", service.GoName, " service.") + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(serverType, service.Location) + g.P("type ", serverType, " interface {") + for _, method := range service.Methods { + g.Annotate(serverType+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + serverSignature(g, method)) + } + g.P("}") + g.P() + + // Server Unimplemented struct for forward compatibility. + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + for _, method := range service.Methods { + nilArg := "" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + nilArg = "nil," + } + g.P("func (*Unimplemented", serverType, ") ", serverSignature(g, method), "{") + g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) + g.P("}") + } + g.P() + + // Server registration. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + serviceDescVar := "_" + service.GoName + "_serviceDesc" + g.P("func Register", service.GoName, "Server(s *", grpcPackage.Ident("Server"), ", srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Methods { + hname := genServerMethod(gen, file, g, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") + g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") + for i, method := range service.Methods { + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") + for i, method := range service.Methods { + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.Desc.IsStreamingServer() { + g.P("ServerStreams: true,") + } + if method.Desc.IsStreamingClient() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.Desc.Path(), "\",") + g.P("}") + g.P() +} + +func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + s := method.GoName + "(ctx " + g.QualifiedGoIdent(contextPackage.Ident("Context")) + if !method.Desc.IsStreamingClient() { + s += ", in *" + g.QualifiedGoIdent(method.Input.GoIdent) + } + s += ", opts ..." + g.QualifiedGoIdent(grpcPackage.Ident("CallOption")) + ") (" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + s += "*" + g.QualifiedGoIdent(method.Output.GoIdent) + } else { + s += method.Parent.GoName + "_" + method.GoName + "Client" + } + s += ", error)" + return s +} + +func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { + service := method.Parent + sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + g.P("out := new(", method.Output.GoIdent, ")") + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(service.GoName) + method.GoName + "Client" + serviceDescVar := "_" + service.GoName + "_serviceDesc" + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.Desc.IsStreamingClient() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingClient() + genRecv := method.Desc.IsStreamingServer() + genCloseAndRecv := !method.Desc.IsStreamingServer() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Client interface {") + if genSend { + g.P("Send(*", method.Input.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Output.GoIdent, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", method.Output.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + var reqArgs []string + ret := "error" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, g.QualifiedGoIdent(contextPackage.Ident("Context"))) + ret = "(*" + g.QualifiedGoIdent(method.Output.GoIdent) + ", error)" + } + if !method.Desc.IsStreamingClient() { + reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent)) + } + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server") + } + return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string { + service := method.Parent + hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) + + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") + g.P("in := new(", method.Input.GoIdent, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") + g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.GoName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(service.GoName) + method.GoName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") + if !method.Desc.IsStreamingClient() { + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingServer() + genSendAndClose := !method.Desc.IsStreamingServer() + genRecv := method.Desc.IsStreamingClient() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Server interface {") + if genSend { + g.P("Send(*", method.Output.GoIdent, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", method.Output.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Input.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {") + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} + +const deprecationComment = "// Deprecated: Do not use." + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 00000000..e810e6fe --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f..00000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08b..00000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 00000000..d399bf06 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go index 35b882c0..e8db57e0 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -1,63 +1,113 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -import "errors" +import ( + "encoding/json" + "errors" + "fmt" + "strconv" -// Deprecated: do not use. + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } -// Deprecated: do not use. +// Deprecated: Do not use. func GetStats() Stats { return Stats{} } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSet(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSet([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSetJSON(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSetJSON([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617c..2187e877 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2c..00000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41b..00000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add3..42fc120c 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,607 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } } - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: return nil, ErrMissingExtension } - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } + return v, nil +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByName(field) } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) if err != nil { - return + if err == ErrMissingExtension { + continue + } + return vs, err } + vs[i] = v } - return + return vs, nil } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable } - registeredExtensions := RegisteredExtensions(pb) - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } - - extensions = append(extensions, desc) } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) return nil } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] } -} -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } - m[desc.Field] = desc + return xts, nil } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } + return true + default: + return false } - return v } -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - return v + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 70fbda53..00000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexicographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a7567..00000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa9194..00000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe07..00000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index a4b8c0cd..dcdc2202 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,162 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -170,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -189,356 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } + + prop.Prop = append(prop.Prop, p) } - // Re-order prop.order. - sort.Sort(prop) + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 00000000..5aee89c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 00000000..1e7ff642 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa9..00000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6..00000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc5..00000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index d97f9b35..00000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,845 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 00000000..4a593100 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 00000000..a31134ee --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af..00000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 00000000..d7c28da5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 00000000..398e3485 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index d371d569..63dc0578 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,2889 +1,200 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto +// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto package descriptor import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/descriptor.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type -type FieldDescriptorProto_Type int32 +const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT +const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 +const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 +const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 +const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 +const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 +const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL +const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING +const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP +const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE +const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES +const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 +const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM +const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 +const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 +const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 +const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{0} -} - -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{1} -} - -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2} -} - -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 1} -} - -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4} -} - -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{5} -} - -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6} -} - -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6, 0} -} - -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{7} -} - -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{8} -} - -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{9} -} - -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} +var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name +var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18} -} - -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19} -} - -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19, 0} -} - -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20} -} +const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED +const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} +var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name +var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo +type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} +const FileOptions_SPEED = descriptorpb.FileOptions_SPEED +const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE +const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name +var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20, 0} -} +type FieldOptions_CType = descriptorpb.FieldOptions_CType -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} +const FieldOptions_STRING = descriptorpb.FieldOptions_STRING +const FieldOptions_CORD = descriptorpb.FieldOptions_CORD +const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo +var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name +var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} +type FieldOptions_JSType = descriptorpb.FieldOptions_JSType -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} +const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL +const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING +const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} +var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name +var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} +type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} +const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN +const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS +const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT -func init() { - proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) -} +var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name +var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value -var fileDescriptor_e5baabe45344a177 = []byte{ - // 2589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, - 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, - 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, - 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, - 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, - 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, - 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, - 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, - 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, - 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, - 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, - 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, - 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, - 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, - 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, - 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, - 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, - 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, - 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, - 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, - 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, - 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, - 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, - 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, - 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, - 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, - 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, - 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, - 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, - 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, - 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, - 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, - 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, - 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, - 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, - 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, - 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, - 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, - 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, - 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, - 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, - 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, - 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, - 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, - 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, - 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, - 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, - 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, - 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, - 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, - 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, - 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, - 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, - 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, - 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, - 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, - 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, - 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, - 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, - 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, - 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, - 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, - 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, - 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, - 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, - 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, - 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, - 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, - 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, - 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, - 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, - 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, - 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, - 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, - 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, - 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, - 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, - 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, - 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, - 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, - 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, - 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, - 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, - 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, - 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, - 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, - 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, - 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, - 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, - 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, - 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, - 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, - 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, - 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, - 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, - 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, - 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, - 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, - 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, - 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, - 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, - 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, - 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, - 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, - 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, - 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, - 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, - 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, - 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, - 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, - 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, - 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, - 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, - 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, - 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, - 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, - 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, - 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, - 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, - 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, - 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, - 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, - 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, - 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, - 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, - 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, - 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, - 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, - 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, - 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, - 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, - 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, - 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, - 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, - 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, - 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, - 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, - 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, - 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, - 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, - 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, - 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, - 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, - 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, - 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, - 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, - 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, - 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, - 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, - 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, - 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, - 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, - 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, - 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, - 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, - 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, - 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, - 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, - 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, - 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, +type FileDescriptorSet = descriptorpb.FileDescriptorSet +type FileDescriptorProto = descriptorpb.FileDescriptorProto +type DescriptorProto = descriptorpb.DescriptorProto +type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions +type FieldDescriptorProto = descriptorpb.FieldDescriptorProto +type OneofDescriptorProto = descriptorpb.OneofDescriptorProto +type EnumDescriptorProto = descriptorpb.EnumDescriptorProto +type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto +type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto +type MethodDescriptorProto = descriptorpb.MethodDescriptorProto + +const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming +const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming + +type FileOptions = descriptorpb.FileOptions + +const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles +const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 +const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor +const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices +const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices +const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices +const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices +const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated +const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas + +type MessageOptions = descriptorpb.MessageOptions + +const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat +const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor +const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated + +type FieldOptions = descriptorpb.FieldOptions + +const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype +const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype +const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated +const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak + +type OneofOptions = descriptorpb.OneofOptions +type EnumOptions = descriptorpb.EnumOptions + +const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated + +type EnumValueOptions = descriptorpb.EnumValueOptions + +const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated + +type ServiceOptions = descriptorpb.ServiceOptions + +const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated + +type MethodOptions = descriptorpb.MethodOptions + +const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated +const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel + +type UninterpretedOption = descriptorpb.UninterpretedOption +type SourceCodeInfo = descriptorpb.SourceCodeInfo +type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo +type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange +type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location +type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation + +var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x32, +} + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } +func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { + if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index a2102d7a..00000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,885 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - } - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - } - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default = false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default = false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default = false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default = false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default = SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default = false]; - optional bool java_generic_services = 17 [default = false]; - optional bool py_generic_services = 18 [default = false]; - optional bool php_generic_services = 42 [default = false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default = false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default = false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - optional string php_metadata_namespace = 44; - - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - optional string ruby_package = 45; - - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default = false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default = false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default = false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default = false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default = false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default = false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default = false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default = false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default = false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default = false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = 34 - [default = IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed = true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed = true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed = true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go new file mode 100644 index 00000000..d45b719d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go @@ -0,0 +1,74 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Install it by building this program and making it accessible within +// your PATH with the name: +// protoc-gen-go +// +// The 'go' suffix becomes part of the argument for the protocol compiler, +// such that it can be invoked as: +// protoc --go_out=paths=source_relative:. path/to/file.proto +// +// This generates Go bindings for the protocol buffer defined by file.proto. +// With that input, the output will be written to: +// path/to/file.pb.go +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/golang/protobuf/internal/gengogrpc" + gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo" + "google.golang.org/protobuf/compiler/protogen" +) + +func main() { + var ( + flags flag.FlagSet + plugins = flags.String("plugins", "", "list of plugins to enable (supported values: grpc)") + importPrefix = flags.String("import_prefix", "", "prefix to prepend to import paths") + ) + importRewriteFunc := func(importPath protogen.GoImportPath) protogen.GoImportPath { + switch importPath { + case "context", "fmt", "math": + return importPath + } + if *importPrefix != "" { + return protogen.GoImportPath(*importPrefix) + importPath + } + return importPath + } + protogen.Options{ + ParamFunc: flags.Set, + ImportRewriteFunc: importRewriteFunc, + }.Run(func(gen *protogen.Plugin) error { + grpc := false + for _, plugin := range strings.Split(*plugins, ",") { + switch plugin { + case "grpc": + grpc = true + case "": + default: + return fmt.Errorf("protoc-gen-go: unknown plugin %q", plugin) + } + } + for _, f := range gen.Files { + if !f.Generate { + continue + } + g := gengo.GenerateFile(gen, f) + if grpc { + gengogrpc.GenerateFileContent(gen, f, g) + } + } + gen.SupportedFeatures = gengo.SupportedFeatures + return nil + }) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 70276e8f..e729dcff 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -1,141 +1,165 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - import ( "fmt" - "reflect" "strings" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" ) -const googleApis = "type.googleapis.com/" +const urlPrefix = "type.googleapis.com/" -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { if any == nil { return "", fmt.Errorf("message is nil") } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } - return any.TypeUrl[slash+1:], nil + return name, nil } -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) if err != nil { return nil, err } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil } -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) if err != nil { return nil, err } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err } - return reflect.New(t.Elem()).Interface().(proto.Message), nil + return proto.MessageV1(mt.New().Interface()), nil } -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. // -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { var err error - d.Message, err = Empty(any) + dm.Message, err = Empty(any) if err != nil { return err } } - return UnmarshalAny(any, d.Message) + m = dm.Message } - aname, err := AnyMessageName(any) + anyName, err := AnyMessageName(any) if err != nil { return err } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) } - return proto.Unmarshal(any.Value, pb) + return proto.Unmarshal(any.Value, m) } -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { return false } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 7b0ad1ad..0ef27d33 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,203 +1,62 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto +// source: github.com/golang/protobuf/ptypes/any/any.proto package any import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/any.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Any = anypb.Any -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} -} - -func (*Any) XXX_WellKnownType() string { return "Any" } +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { - proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) -} - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index c9be8541..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,155 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index c0d595da..fb9edd5c 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -1,35 +1,6 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. -/* -Package ptypes contains code for interacting with well-known types. -*/ +// Package ptypes provides functionality for interacting with well-known types. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 26d1ca2f..6110ae8a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -1,102 +1,72 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - import ( "errors" "fmt" "time" - durpb "github.com/golang/protobuf/ptypes/duration" + durationpb "github.com/golang/protobuf/ptypes/duration" ) +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { return 0, err } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } } return d, nil } -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, + return &durationpb.Duration{ + Seconds: int64(secs), Nanos: int32(nanos), } } + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 58b07869..d0079ee3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,163 +1,63 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto +// source: github.com/golang/protobuf/ptypes/duration/duration.proto package duration import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/duration.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Duration = durationpb.Duration -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} -} - -func (*Duration) XXX_WellKnownType() string { return "Duration" } +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { - proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) -} - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 99cb102c..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go index 6bd9f674..16686a65 100644 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -1,85 +1,62 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/empty.proto +// source: github.com/golang/protobuf/ptypes/empty/empty.proto package empty import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -type Empty struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_900544acb223d5b8, []int{0} -} - -func (*Empty) XXX_WellKnownType() string { return "Empty" } - -func (m *Empty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Empty.Unmarshal(m, b) -} -func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Empty.Marshal(b, m, deterministic) -} -func (m *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(m, src) -} -func (m *Empty) XXX_Size() int { - return xxx_messageInfo_Empty.Size(m) -} -func (m *Empty) XXX_DiscardUnknown() { - xxx_messageInfo_Empty.DiscardUnknown(m) -} - -var xxx_messageInfo_Empty proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") -} - -func init() { - proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) -} - -var fileDescriptor_900544acb223d5b8 = []byte{ - // 148 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, - 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, - 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, - 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, - 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, - 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, - 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, - 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, - 0xb7, 0x00, 0x00, 0x00, +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto deleted file mode 100644 index 03cacd23..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto +++ /dev/null @@ -1,52 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/empty"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "EmptyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 8da0df01..026d0d49 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -1,46 +1,18 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements operations on google.protobuf.Timestamp. - import ( "errors" "fmt" "time" - tspb "github.com/golang/protobuf/ptypes/timestamp" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" ) +// Range of google.protobuf.Duration as specified in timestamp.proto. const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). @@ -50,44 +22,18 @@ const ( maxValidSeconds = 253402300800 ) -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// Timestamp converts a timestamppb.Timestamp to a time.Time. // It returns an error if the argument is invalid. // -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time @@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { +func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") @@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } @@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) { return ts, nil } -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 7a3b1e40..a76f8076 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,185 +1,64 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto package timestamp import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/timestamp.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Timestamp = timestamppb.Timestamp -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard -// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using -// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with -// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use -// the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} -} - -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { - proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) -} - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index cd357864..00000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,138 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard -// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using -// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with -// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use -// the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 2133562b..66561868 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -6,6 +6,10 @@ // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // @@ -22,8 +26,8 @@ // equality is determined by recursively comparing the primitive kinds on both // values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported // fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared -// using the AllowUnexported option. +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. package cmp import ( @@ -62,8 +66,8 @@ import ( // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported -// option explicitly permits comparing the unexported field. +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. @@ -80,7 +84,58 @@ import ( // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively // calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from y, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) @@ -103,33 +158,7 @@ func Equal(x, y interface{}, opts ...Option) bool { t = vx.Type() } - s := newState(opts) - s.compareAny(&pathStep{t, vx, vy}) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added to y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - eq := Equal(x, y, Options(opts), Reporter(r)) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d + return &pathStep{t, vx, vy} } type state struct { @@ -137,6 +166,7 @@ type state struct { // Calling statelessCompare must not result in observable changes to these. result diff.Result // The current result of comparison curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers reporters []reporter // Optional reporters // recChecker checks for infinite cycles applying the same set of @@ -148,13 +178,14 @@ type state struct { dynChecker dynChecker // These fields, once set by processOption, will not change. - exporters map[reflect.Type]bool // Set of structs with unexported field visibility - opts Options // List of all fundamental and filter options + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options } func newState(opts []Option) *state { // Always ensure a validator option exists to validate the inputs. s := &state{opts: Options{validator{}}} + s.curPtrs.Init() s.processOption(Options(opts)) return s } @@ -174,13 +205,8 @@ func (s *state) processOption(opt Option) { panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) } s.opts = append(s.opts, opt) - case visibleStructs: - if s.exporters == nil { - s.exporters = make(map[reflect.Type]bool) - } - for t := range opt { - s.exporters[t] = true - } + case exporter: + s.exporters = append(s.exporters, opt) case reporter: s.reporters = append(s.reporters, opt) default: @@ -192,9 +218,9 @@ func (s *state) processOption(opt Option) { // This function is stateless in that it does not alter the current result, // or output to any registered reporters. func (s *state) statelessCompare(step PathStep) diff.Result { - // We do not save and restore the curPath because all of the compareX - // methods should properly push and pop from the path. - // It is an implementation bug if the contents of curPath differs from + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from // when calling this function to when returning from it. oldResult, oldReporters := s.result, s.reporters @@ -216,9 +242,17 @@ func (s *state) compareAny(step PathStep) { } s.recChecker.Check(s.curPath) - // Obtain the current type and values. + // Cycle-detection for slice elements (see NOTE in compareSlice). t := step.Type() vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } // Rule 1: Check whether an option applies on this node in the value tree. if s.tryOptions(t, vx, vy) { @@ -342,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). if !flags.AtLeastGo110 { if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { return reflect.New(t).Elem() @@ -352,8 +386,10 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { } func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy + var mayForce, mayForceInit bool step := StructField{&structField{}} for i := 0; i < t.NumField(); i++ { step.typ = t.Field(i).Type @@ -372,10 +408,18 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } - step.mayForce = s.exporters[t] + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) @@ -391,9 +435,21 @@ func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. - step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} withIndexes := func(ix, iy int) SliceIndex { if ix >= 0 { step.vx, step.xkey = vx.Index(ix), ix @@ -470,7 +526,12 @@ func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. @@ -507,7 +568,12 @@ func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) vx, vy = vx.Elem(), vy.Elem() s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index abc3a1c3..dfa5d213 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -8,8 +8,8 @@ package cmp import "reflect" -const supportAllowUnexported = false +const supportExporters = false -func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { - panic("retrieveUnexportedField is not implemented") +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 59d4ee91..351f1a34 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -11,13 +11,25 @@ import ( "unsafe" ) -const supportAllowUnexported = true +const supportExporters = true // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 3d2e4266..730e223e 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -12,6 +12,13 @@ // is more important than obtaining a minimal Levenshtein distance. package diff +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + // EditType represents a single operation within an edit-script. type EditType uint8 @@ -112,6 +119,8 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } +var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) + // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // @@ -159,6 +168,17 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. + // To ensure flexibility in changing the algorithm in the future, + // introduce some degree of deliberate instability. + // This is achieved by fiddling the zigzag iterator to start searching + // the graph starting from the bottom-right versus than the top-left. + // The result may differ depending on the starting search location, + // but still produces a valid edit script. + zigzagInit := randInt // either 0 or 1 + if flags.Deterministic { + zigzagInit = 0 + } + // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -209,7 +229,7 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { break } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 00000000..8228e7d5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 0a01c479..e9e384a1 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer { // assumes that the GC implementation does not use a moving collector. return Pointer{v.Pointer(), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index da134ae2..b50c17ec 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer { // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 79344816..4b0407a7 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -225,8 +225,23 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) } panic("not reachable") @@ -360,9 +375,8 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// AllowUnexported returns an Option that forcibly allows operations on -// unexported fields in certain structs, which are specified by passing in a -// value of each struct type. +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal @@ -386,10 +400,24 @@ func (cm comparer) String() string { // // In other cases, the cmpopts.IgnoreUnexported option can be used to ignore // all unexported fields on specified struct types. -func AllowUnexported(types ...interface{}) Option { - if !supportAllowUnexported { - panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { t := reflect.TypeOf(typ) @@ -398,13 +426,7 @@ func AllowUnexported(types ...interface{}) Option { } m[t] = true } - return visibleStructs(m) -} - -type visibleStructs map[reflect.Type]bool - -func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") + return exporter(func(t reflect.Type) bool { return m[t] }) } // Result represents the comparison result for a single node and @@ -436,6 +458,11 @@ func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + type resultFlags uint const ( @@ -446,6 +473,7 @@ const ( reportByIgnore reportByMethod reportByFunc + reportByCycle ) // Reporter is an Option that can be passed to Equal. When Equal traverses diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 96fffd29..603dbb00 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -10,6 +10,8 @@ import ( "strings" "unicode" "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" ) // Path is a list of PathSteps describing the sequence of operations to get @@ -41,7 +43,7 @@ type PathStep interface { // In some cases, one or both may be invalid or have restrictions: // • For StructField, both are not interface-able if the current field // is unexported and the struct type is not explicitly permitted by - // AllowUnexported to traverse unexported fields. + // an Exporter to traverse unexported fields. // • For SliceIndex, one may be invalid if an element is missing from // either the x or y slice. // • For MapIndex, one may be invalid if an entry is missing from @@ -175,7 +177,8 @@ type structField struct { // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) field reflect.StructField // Field information } @@ -187,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) { // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field) - vy = retrieveUnexportedField(sf.pvy, sf.field) + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false @@ -207,6 +210,7 @@ type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep xkey, ykey int + isSlice bool // False for reflect.Array } func (si SliceIndex) Type() reflect.Type { return si.typ } @@ -301,6 +305,72 @@ func (tf Transform) Func() reflect.Value { return tf.trans.fnc } // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index 6ddf2999..aafcb363 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -41,7 +41,10 @@ func (r *defaultReporter) String() string { if r.root.NumDiff == 0 { return "" } - return formatOptions{}.FormatDiff(r.root).String() + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() } func assert(ok bool) { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eed..9e218096 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -11,14 +11,6 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// TODO: Enforce limits? -// * Enforce maximum number of records to print per node? -// * Enforce maximum size in bytes allowed? -// * As a heuristic, use less verbosity for equal nodes than unequal nodes. -// TODO: Enforce unique outputs? -// * Avoid Stringer methods if it results in same output? -// * Print pointer address if outputs still equal? - // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 @@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 3 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode) textNode { +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else { + opts = opts.WithVerbosity(3) + } + // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + // For leaf nodes, format the value based on the reflect.Values alone. if v.MaxDepth == 0 { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + // Descend into the child value node. if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value) - out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) case reflect.Ptr: - return textWrap{"&", opts.FormatDiff(v.Value), ""} + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: - return opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } + return out } } -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string @@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) - formatKey = formatMapKey + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- } // Handle unification. @@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool @@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te } continue } - if out := opts.FormatDiff(r.Value); out != nil { + if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. + var numDiffs int var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. @@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te // Format the equal values. for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } recs = recs[numEqual:] continue @@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) } default: - out := opts.FormatDiff(r.Value) + out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } - assert(len(recs) == 0) - return textWrap{"{", list, "}"} + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 00000000..d620c2c2 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b628..786f6712 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -5,13 +5,14 @@ package cmp import ( + "bytes" "fmt" "reflect" "strconv" "strings" "unicode" + "unicode/utf8" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) @@ -20,14 +21,22 @@ type formatValueOptions struct { // methods like error.Error or fmt.Stringer.String. AvoidStringer bool - // ShallowPointers controls whether to avoid descending into pointers. - // Useful when printing map keys, where pointer comparison is performed - // on the pointer address rather than the pointed-at value. - ShallowPointers bool - // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. @@ -44,12 +53,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { default: return s } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } case elideType: return s } // Determine the type label, applying special handling for unnamed types. - typeName := t.String() + typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). @@ -57,39 +69,77 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } - typeName = strings.Replace(typeName, "struct {", "struct{", -1) - typeName = strings.Replace(typeName, "interface {", "interface{", -1) } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } - // Avoid wrap the value in parenthesis if unnecessary. - if s, ok := s.(textWrap); ok { - hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") - hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { - return textWrap{typeName, s, ""} + return s } } - return textWrap{typeName + "(", s, ")"} + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - switch v := v.Interface().(type) { - case error: - return textLine("e" + formatString(v.Error())) - case fmt.Stringer: - return textLine("s" + formatString(v.String())) + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) } } } @@ -102,94 +152,140 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t } }() - var ptr string switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: - return textLine(formatString(v.String())) + return opts.formatString("", v.String()) case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(v)) + return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) if value.IsZero(vv) { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) - list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check whether this is a []byte of text data. + if t.Elem() == reflect.TypeOf(byte(0)) { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + return opts.WithTypeMode(emitType).FormatType(t, out) + } } + fallthrough case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for i := 0; i < v.Len(); i++ { - vi := v.Index(i) - if vi.CanAddr() { // Check for cyclic elements - p := vi.Addr() - if m.Visit(p) { - var out textNode - out = textLine(formatPointer(p)) - out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) - out = textWrap{"*", out, ""} - list = append(list, textRecord{Value: out}) - continue - } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out case reflect.Map: if v.IsNil() { return textNil } - if m.Visit(v) { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) } + defer ptrs.Pop() + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for _, k := range value.SortKeys(v.MapKeys()) { - sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } - if opts.PrintAddresses { - ptr = formatPointer(v) - } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out case reflect.Ptr: if v.IsNil() { return textNil } - if m.Visit(v) || opts.ShallowPointers { - return textLine(formatPointer(v)) - } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} } + defer ptrs.Pop() + skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out case reflect.Interface: if v.IsNil() { return textNil @@ -197,19 +293,65 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } } +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value) string { +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions + opts.DiffMode = diffIdentical opts.TypeMode = elideType - opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } @@ -227,7 +369,7 @@ func formatString(s string) string { rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } - if strings.IndexFunc(s, rawInvalid) < 0 { + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs @@ -256,23 +398,3 @@ func formatHex(u uint64) string { } return fmt.Sprintf(f, u) } - -// formatPointer prints the address of the pointer. -func formatPointer(v reflect.Value) string { - p := v.Pointer() - if flags.Deterministic { - p = 0xdeadf00f // Only used for stable testing purposes - } - return fmt.Sprintf("⟪0x%x⟫", p) -} - -type visitedPointers map[value.Pointer]struct{} - -// Visit inserts pointer v into the visited map and reports whether it had -// already been visited before. -func (m visitedPointers) Visit(v reflect.Value) bool { - p := value.PointerOf(v) - _, visited := m[p] - m[p] = struct{}{} - return visited -} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4..35315dad 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strconv" "strings" "unicode" "unicode/utf8" @@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected - case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: - // TODO: Handle the case where someone uses bytes.Equal on a large slice. - return false // Some custom option was used to determined equality case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid + case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): + return false // Both slice values have to be non-empty + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false } switch t := v.Type; t.Kind() { @@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } if isText || isBinary { var numLines, lastLineIdx, maxLineLen int - isBinary = false + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) for i, r := range sx + sy { if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { isBinary = true @@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } } isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 } // Format the string into printable records. @@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. @@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "" + // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. @@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) + // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. @@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) @@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Wrap the output with appropriate type information. - var out textNode = textWrap{"{", list, "}"} + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if !isText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). @@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } switch t.Kind() { case reflect.String: - out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf(string("")) { out = opts.FormatType(t, out) } case reflect.Slice: - out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf([]byte(nil)) { out = opts.FormatType(t, out) } @@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice( return n0 - v.Len() } + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. @@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice( } // Print unequal. + len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) } - assert(vx.Len() == 0 && vy.Len() == 0) return list } diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b8fcab7..8b12c05c 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -10,12 +10,15 @@ import ( "math/rand" "strings" "time" + "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 +const maxColumnLength = 80 + type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { @@ -91,21 +94,22 @@ type textNode interface { // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting } -func (s textWrap) Len() int { +func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } -func (s1 textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(textWrap); ok { +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } -func (s textWrap) String() string { +func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) @@ -114,7 +118,7 @@ func (s textWrap) String() string { b = append(b, '\n') // Trailing newline return string(b) } -func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) @@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } return b, s } -func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) @@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - Comment fmt.Stringer // e.g., "6 identical fields" + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := ds != diffStats{} + hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { - *s = append(*s, textRecord{Value: textEllipsis}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } @@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool { } func (s textList) String() string { - return textWrap{"{", s, "}"}.String() + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { @@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { @@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, - func(r textRecord) int { return len(r.Key) }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, - func(r textRecord) int { return len(r.Value.(textLine)) }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + // Format the list as a multi-lined output. n++ for i, r := range s { @@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.Value.Equal(textEllipsis) { + if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') @@ -332,6 +371,11 @@ type diffStats struct { NumModified int } +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6..00000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268..00000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11f..00000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d..00000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9a..00000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79..00000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b1746163..00000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6c..00000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b0..00000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc..00000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddb..00000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54d..00000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cd..00000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c737..00000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404cc..00000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac6..00000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 84af91c9..00000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/go-checkpoint/LICENSE similarity index 100% rename from vendor/github.com/hashicorp/hcl/LICENSE rename to vendor/github.com/hashicorp/go-checkpoint/LICENSE diff --git a/vendor/github.com/hashicorp/go-checkpoint/README.md b/vendor/github.com/hashicorp/go-checkpoint/README.md new file mode 100644 index 00000000..e717b6ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/README.md @@ -0,0 +1,22 @@ +# Go Checkpoint Client + +[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at +Hashicorp that we use to check version information, broadcast security +bulletins, etc. + +We understand that software making remote calls over the internet +for any reason can be undesirable. Because of this, Checkpoint can be +disabled in all of our software that includes it. You can view the source +of this client to see that we're not sending any private information. + +Each Hashicorp application has it's specific configuration option +to disable checkpoint calls, but the `CHECKPOINT_DISABLE` makes +the underlying checkpoint component itself disabled. For example +in the case of packer: +``` +CHECKPOINT_DISABLE=1 packer build +``` + +**Note:** This repository is probably useless outside of internal HashiCorp +use. It is open source for disclosure and because our open source projects +must be able to link to it. diff --git a/vendor/github.com/hashicorp/go-checkpoint/check.go b/vendor/github.com/hashicorp/go-checkpoint/check.go new file mode 100644 index 00000000..109d0d35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/check.go @@ -0,0 +1,368 @@ +package checkpoint + +import ( + crand "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB} + +// CheckParams are the parameters for configuring a check request. +type CheckParams struct { + // Product and version are used to lookup the correct product and + // alerts for the proper version. The version is also used to perform + // a version check. + Product string + Version string + + // Arch and OS are used to filter alerts potentially only to things + // affecting a specific os/arch combination. If these aren't specified, + // they'll be automatically filled in. + Arch string + OS string + + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string + SignatureFile string + + // CacheFile, if specified, will cache the result of a check. The + // duration of the cache is specified by CacheDuration, and defaults + // to 48 hours if not specified. If the CacheFile is newer than the + // CacheDuration, than the Check will short-circuit and use those + // results. + // + // If the CacheFile directory doesn't exist, it will be created with + // permissions 0755. + CacheFile string + CacheDuration time.Duration + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// CheckResponse is the response for a check request. +type CheckResponse struct { + Product string `json:"product"` + CurrentVersion string `json:"current_version"` + CurrentReleaseDate int `json:"current_release_date"` + CurrentDownloadURL string `json:"current_download_url"` + CurrentChangelogURL string `json:"current_changelog_url"` + ProjectWebsite string `json:"project_website"` + Outdated bool `json:"outdated"` + Alerts []*CheckAlert `json:"alerts"` +} + +// CheckAlert is a single alert message from a check request. +// +// These never have to be manually constructed, and are typically populated +// into a CheckResponse as a result of the Check request. +type CheckAlert struct { + ID int `json:"id"` + Date int `json:"date"` + Message string `json:"message"` + URL string `json:"url"` + Level string `json:"level"` +} + +// Check checks for alerts and new version information. +func Check(p *CheckParams) (*CheckResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &CheckResponse{}, nil + } + + // Set a default timeout of 3 sec for the check request (in milliseconds) + timeout := 3000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + // If we have a cached result, then use that + if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil { + return nil, err + } else if r != nil { + defer r.Close() + return checkResult(r) + } + + var u url.URL + + if p.Arch == "" { + p.Arch = runtime.GOARCH + } + if p.OS == "" { + p.OS = runtime.GOOS + } + + // If we're given a SignatureFile, then attempt to read that. + signature := p.Signature + if p.Signature == "" && p.SignatureFile != "" { + var err error + signature, err = checkSignature(p.SignatureFile) + if err != nil { + return nil, err + } + } + + v := u.Query() + v.Set("version", p.Version) + v.Set("arch", p.Arch) + v.Set("os", p.OS) + v.Set("signature", signature) + + u.Scheme = "https" + u.Host = "checkpoint-api.hashicorp.com" + u.Path = fmt.Sprintf("/v1/check/%s", p.Product) + u.RawQuery = v.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + var r io.Reader = resp.Body + if p.CacheFile != "" { + // Make sure the directory holding our cache exists. + if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil { + return nil, err + } + + // We have to cache the result, so write the response to the + // file as we read it. + f, err := os.Create(p.CacheFile) + if err != nil { + return nil, err + } + + // Write the cache header + if err := writeCacheHeader(f, p.Version); err != nil { + f.Close() + os.Remove(p.CacheFile) + return nil, err + } + + defer f.Close() + r = io.TeeReader(r, f) + } + + return checkResult(r) +} + +// CheckInterval is used to check for a response on a given interval duration. +// The interval is not exact, and checks are randomized to prevent a thundering +// herd. However, it is expected that on average one check is performed per +// interval. The returned channel may be closed to stop background checks. +func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} { + doneCh := make(chan struct{}) + + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return doneCh + } + + go func() { + for { + select { + case <-time.After(randomStagger(interval)): + resp, err := Check(p) + cb(resp, err) + case <-doneCh: + return + } + } + }() + + return doneCh +} + +// randomStagger returns an interval that is between 3/4 and 5/4 of +// the given interval. The expected value is the interval. +func randomStagger(interval time.Duration) time.Duration { + stagger := time.Duration(mrand.Int63()) % (interval / 2) + return 3*(interval/4) + stagger +} + +func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + // File doesn't exist, not a problem + return nil, nil + } + + return nil, err + } + + if d == 0 { + d = 48 * time.Hour + } + + if fi.ModTime().Add(d).Before(time.Now()) { + // Cache is busted, delete the old file and re-request. We ignore + // errors here because re-creating the file is fine too. + os.Remove(path) + return nil, nil + } + + // File looks good so far, open it up so we can inspect the contents. + f, err := os.Open(path) + if err != nil { + return nil, err + } + + // Check the signature of the file + var sig [4]byte + if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil { + f.Close() + return nil, err + } + if !reflect.DeepEqual(sig, magicBytes) { + // Signatures don't match. Reset. + f.Close() + return nil, nil + } + + // Check the version. If it changed, then rewrite + var length uint32 + if err := binary.Read(f, binary.LittleEndian, &length); err != nil { + f.Close() + return nil, err + } + data := make([]byte, length) + if _, err := io.ReadFull(f, data); err != nil { + f.Close() + return nil, err + } + if string(data) != current { + // Version changed, reset + f.Close() + return nil, nil + } + + return f, nil +} +func checkResult(r io.Reader) (*CheckResponse, error) { + var result CheckResponse + if err := json.NewDecoder(r).Decode(&result); err != nil { + return nil, err + } + return &result, nil +} + +func checkSignature(path string) (string, error) { + _, err := os.Stat(path) + if err == nil { + // The file exists, read it out + sigBytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + // Split the file into lines + lines := strings.SplitN(string(sigBytes), "\n", 2) + if len(lines) > 0 { + return strings.TrimSpace(lines[0]), nil + } + } + + // If this isn't a non-exist error, then return that. + if !os.IsNotExist(err) { + return "", err + } + + // The file doesn't exist, so create a signature. + var b [16]byte + n := 0 + for n < 16 { + n2, err := crand.Read(b[n:]) + if err != nil { + return "", err + } + + n += n2 + } + signature := fmt.Sprintf( + "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + // Make sure the directory holding our signature exists. + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return "", err + } + + // Write the signature + if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil { + return "", err + } + + return signature, nil +} + +func writeCacheHeader(f io.Writer, v string) error { + // Write our signature first + if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil { + return err + } + + // Write out our current version length + length := uint32(len(v)) + if err := binary.Write(f, binary.LittleEndian, length); err != nil { + return err + } + + _, err := f.Write([]byte(v)) + return err +} + +// userMessage is suffixed to the signature file to provide feedback. +var userMessage = ` +This signature is a randomly generated UUID used to de-duplicate +alerts and version information. This signature is random, it is +not based on any personally identifiable information. To create +a new signature, you can simply delete this file at any time. +See the documentation for the software using Checkpoint for more +information on how to disable it. +` diff --git a/vendor/github.com/hashicorp/go-checkpoint/go.mod b/vendor/github.com/hashicorp/go-checkpoint/go.mod new file mode 100644 index 00000000..be0c793e --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-checkpoint + +require ( + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-uuid v1.0.0 +) diff --git a/vendor/github.com/hashicorp/go-checkpoint/go.sum b/vendor/github.com/hashicorp/go-checkpoint/go.sum new file mode 100644 index 00000000..2128a0c8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= diff --git a/vendor/github.com/hashicorp/go-checkpoint/telemetry.go b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go new file mode 100644 index 00000000..b9ee6298 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go @@ -0,0 +1,118 @@ +package checkpoint + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "runtime" + "time" + + "github.com/hashicorp/go-cleanhttp" + uuid "github.com/hashicorp/go-uuid" +) + +// ReportParams are the parameters for configuring a telemetry report. +type ReportParams struct { + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string `json:"signature"` + SignatureFile string `json:"-"` + + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Arch string `json:"arch"` + OS string `json:"os"` + Payload interface{} `json:"payload,omitempty"` + Product string `json:"product"` + RunID string `json:"run_id"` + SchemaVersion string `json:"schema_version"` + Version string `json:"version"` +} + +func (i *ReportParams) signature() string { + signature := i.Signature + if i.Signature == "" && i.SignatureFile != "" { + var err error + signature, err = checkSignature(i.SignatureFile) + if err != nil { + return "" + } + } + return signature +} + +// Report sends telemetry information to checkpoint +func Report(ctx context.Context, r *ReportParams) error { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil + } + + req, err := ReportRequest(r) + if err != nil { + return err + } + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + if resp.StatusCode != 201 { + return fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + return nil +} + +// ReportRequest creates a request object for making a report +func ReportRequest(r *ReportParams) (*http.Request, error) { + // Populate some fields automatically if we can + if r.RunID == "" { + uuid, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + r.RunID = uuid + } + if r.Arch == "" { + r.Arch = runtime.GOARCH + } + if r.OS == "" { + r.OS = runtime.GOOS + } + if r.Signature == "" { + r.Signature = r.signature() + } + + b, err := json.Marshal(r) + if err != nil { + return nil, err + } + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/telemetry/%s", r.Product), + } + + req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + return req, nil +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/versions.go b/vendor/github.com/hashicorp/go-checkpoint/versions.go new file mode 100644 index 00000000..a5b0d3b3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/versions.go @@ -0,0 +1,90 @@ +package checkpoint + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +// VersionsParams are the parameters for a versions request. +type VersionsParams struct { + // Service is used to lookup the correct service. + Service string + + // Product is used to filter the version contraints. + Product string + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// VersionsResponse is the response for a versions request. +type VersionsResponse struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// Versions returns the version constrains for a given service and product. +func Versions(p *VersionsParams) (*VersionsResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &VersionsResponse{}, nil + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + v := url.Values{} + v.Set("product", p.Product) + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/versions/%s", p.Service), + RawQuery: v.Encode(), + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + result := &VersionsResponse{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-cty/LICENSE b/vendor/github.com/hashicorp/go-cty/LICENSE new file mode 100644 index 00000000..d6503b55 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule.go b/vendor/github.com/hashicorp/go-cty/cty/capsule.go new file mode 100644 index 00000000..2fdc15ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule.go @@ -0,0 +1,128 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type + Ops *CapsuleOps +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + impl := t.Ops.TypeGoString + if impl == nil { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + if t.Ops == noCapsuleOps { + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) + } else { + // Including the operations in the output will make this _very_ long, + // so in practice any capsule type with ops ought to provide a + // TypeGoString function to override this with something more + // reasonable. + return fmt.Sprintf("cty.CapsuleWithOps(%q, reflect.TypeOf(%#v), %#v)", t.Name, victimVal.Interface(), t.Ops) + } + } + return impl(t.GoType) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value by default supports no operations except +// equality, and equality is implemented by pointer identity of the +// encapsulated pointer. A capsule type can optionally have its own +// implementations of certain operations if it is created with CapsuleWithOps +// instead of Capsule. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: noCapsuleOps, + }, + } +} + +// CapsuleWithOps is like Capsule except the caller may provide an object +// representing some overloaded operation implementations to associate with +// the given capsule type. +// +// All of the other caveats and restrictions for capsule types still apply, but +// overloaded operations can potentially help a capsule type participate better +// in cty operations. +func CapsuleWithOps(name string, nativeType reflect.Type, ops *CapsuleOps) Type { + // Copy the operations to make sure the caller can't modify them after + // we're constructed. + ourOps := *ops + ourOps.assertValid() + + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: &ourOps, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go new file mode 100644 index 00000000..3ff6855e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go @@ -0,0 +1,132 @@ +package cty + +import ( + "reflect" +) + +// CapsuleOps represents a set of overloaded operations for a capsule type. +// +// Each field is a reference to a function that can either be nil or can be +// set to an implementation of the corresponding operation. If an operation +// function is nil then it isn't supported for the given capsule type. +type CapsuleOps struct { + // GoString provides the GoString implementation for values of the + // corresponding type. Conventionally this should return a string + // representation of an expression that would produce an equivalent + // value. + GoString func(val interface{}) string + + // TypeGoString provides the GoString implementation for the corresponding + // capsule type itself. + TypeGoString func(goTy reflect.Type) string + + // Equals provides the implementation of the Equals operation. This is + // called only with known, non-null values of the corresponding type, + // but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.Equals on those nested values. + // + // The result value must always be of type cty.Bool, or the Equals + // operation will panic. + // + // If RawEquals is set without also setting Equals, the RawEquals + // implementation will be used as a fallback implementation. That fallback + // is appropriate only for leaf types that do not contain any nested + // cty.Value that would need to distinguish Equals vs. RawEquals for their + // own equality. + // + // If RawEquals is nil then Equals must also be nil, selecting the default + // pointer-identity comparison instead. + Equals func(a, b interface{}) Value + + // RawEquals provides the implementation of the RawEquals operation. + // This is called only with known, non-null values of the corresponding + // type, but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.RawEquals on those nested values. + // + // If RawEquals is nil, values of the corresponding type are compared by + // pointer identity of the encapsulated value. + RawEquals func(a, b interface{}) bool + + // ConversionFrom can provide conversions from the corresponding type to + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given source type. Return nil to indicate + // that no such conversion is available. + ConversionFrom func(src Type) func(interface{}, Path) (Value, error) + + // ConversionTo can provide conversions to the corresponding type from + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given destination type. Return nil to indicate + // that no such conversion is available. + ConversionTo func(dst Type) func(Value, Path) (interface{}, error) + + // ExtensionData is an extension point for applications that wish to + // create their own extension features using capsule types. + // + // The key argument is any value that can be compared with Go's == + // operator, but should be of a named type in a package belonging to the + // application defining the key. An ExtensionData implementation must + // check to see if the given key is familar to it, and if so return a + // suitable value for the key. + // + // If the given key is unrecognized, the ExtensionData function must + // return a nil interface. (Importantly, not an interface containing a nil + // pointer of some other type.) + // The common implementation of ExtensionData is a single switch statement + // over "key" which has a default case returning nil. + // + // The meaning of any given key is entirely up to the application that + // defines it. Applications consuming ExtensionData from capsule types + // should do so defensively: if the result of ExtensionData is not valid, + // prefer to ignore it or gracefully produce an error rather than causing + // a panic. + ExtensionData func(key interface{}) interface{} +} + +// noCapsuleOps is a pointer to a CapsuleOps with no functions set, which +// is used as the default operations value when a type is created using +// the Capsule function. +var noCapsuleOps = &CapsuleOps{} + +func (ops *CapsuleOps) assertValid() { + if ops.RawEquals == nil && ops.Equals != nil { + panic("Equals cannot be set without RawEquals") + } +} + +// CapsuleOps returns a pointer to the CapsuleOps value for a capsule type, +// or panics if the receiver is not a capsule type. +// +// The caller must not modify the CapsuleOps. +func (ty Type) CapsuleOps() *CapsuleOps { + if !ty.IsCapsuleType() { + panic("not a capsule-typed value") + } + + return ty.typeImpl.(*capsuleType).Ops +} + +// CapsuleExtensionData is a convenience interface to the ExtensionData +// function that can be optionally implemented for a capsule type. It will +// check to see if the underlying type implements ExtensionData and call it +// if so. If not, it will return nil to indicate that the given key is not +// supported. +// +// See the documentation for CapsuleOps.ExtensionData for more information +// on the purpose of and usage of this mechanism. +// +// If CapsuleExtensionData is called on a non-capsule type then it will panic. +func (ty Type) CapsuleExtensionData(key interface{}) interface{} { + ops := ty.CapsuleOps() + if ops.ExtensionData == nil { + return nil + } + return ops.ExtensionData(key) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/collection.go b/vendor/github.com/hashicorp/go-cty/cty/collection.go new file mode 100644 index 00000000..ab3919b1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go new file mode 100644 index 00000000..6ad3bff4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go new file mode 100644 index 00000000..9c59c8f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go @@ -0,0 +1,190 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + var ret conversion + ret = func(in cty.Value, path cty.Path) (cty.Value, error) { + if in.IsMarked() { + // We must unmark during the conversion and then re-apply the + // same marks to the result. + in, inMarks := in.Unmark() + v, err := ret(in, path) + if v != cty.NilVal { + v = v.WithMarks(inMarks) + } + return v, err + } + + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } + + return ret +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + case out.IsObjectType() && in.IsMapType(): + if !unsafe { + // Converting a map to an object is an "unsafe" conversion, + // because we don't know if all the map keys will correspond to + // object attributes. + return nil + } + return conversionMapToObject(in, out, unsafe) + + case in.IsCapsuleType() || out.IsCapsuleType(): + if !unsafe { + // Capsule types can only participate in "unsafe" conversions, + // because we don't know enough about their conversion behaviors + // to be sure that they will always be safe. + return nil + } + if in.Equals(out) { + // conversion to self is never allowed + return nil + } + if out.IsCapsuleType() { + if fn := out.CapsuleOps().ConversionTo; fn != nil { + return conversionToCapsule(in, out, fn) + } + } + if in.IsCapsuleType() { + if fn := in.CapsuleOps().ConversionFrom; fn != nil { + return conversionFromCapsule(in, out, fn) + } + } + // No conversion operation is available, then. + return nil + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go new file mode 100644 index 00000000..6a6006af --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go @@ -0,0 +1,31 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +func conversionToCapsule(inTy, outTy cty.Type, fn func(inTy cty.Type) func(cty.Value, cty.Path) (interface{}, error)) conversion { + rawConv := fn(inTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + rawV, err := rawConv(in, path) + if err != nil { + return cty.NilVal, err + } + return cty.CapsuleVal(outTy, rawV), nil + } +} + +func conversionFromCapsule(inTy, outTy cty.Type, fn func(outTy cty.Type) func(interface{}, cty.Path) (cty.Value, error)) conversion { + rawConv := fn(outTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + return rawConv(in.EncapsulatedValue(), path) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 00000000..575973d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,488 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty set + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, elemPath.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty map + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.MapValEmpty(ety), nil + } + + if ety.IsCollectionType() || ety.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, false); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.SetValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + if mapEty.IsCollectionType() || mapEty.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, unsafe); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionMapToObject returns a conversion that will take a value of the +// given map type and return an object of the given type. The object attribute +// types must all be compatible with the map element type. +// +// Will panic if the given mapType and objType are not maps and objects +// respectively. +func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conversion { + objectAtys := objType.AttributeTypes() + mapEty := mapType.ElementType() + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(mapEty, objectAty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + + // if there is no corresponding attribute, we skip this key + if _, ok := objectAtys[name.AsString()]; !ok { + continue + } + + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[name.AsString()] = val + } + + return cty.ObjectVal(elems), nil + } +} + +func conversionUnifyCollectionElements(elems map[string]cty.Value, path cty.Path, unsafe bool) (map[string]cty.Value, error) { + elemTypes := make([]cty.Type, 0, len(elems)) + for _, elem := range elems { + elemTypes = append(elemTypes, elem.Type()) + } + unifiedType, _ := unify(elemTypes, unsafe) + if unifiedType == cty.NilType { + } + + unifiedElems := make(map[string]cty.Value) + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elem.Type().Equals(unifiedType) { + unifiedElems[name] = elem + continue + } + conv := getConversion(elem.Type(), unifiedType, unsafe) + if conv == nil { + } + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + val, err := conv(elem, elemPath) + if err != nil { + return nil, err + } + unifiedElems[name] = val + } + + return unifiedElems, nil +} + +func conversionCheckMapElementTypes(elems map[string]cty.Value, path cty.Path) error { + elementType := cty.NilType + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elementType == cty.NilType { + elementType = elem.Type() + continue + } + if !elementType.Equals(elem.Type()) { + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + return elemPath.NewErrorf("%s is required", elementType.FriendlyName()) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 00000000..5f571da1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go new file mode 100644 index 00000000..93743ca8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 00000000..a5534441 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,57 @@ +package convert + +import ( + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + v, err := cty.ParseNumberVal(val.AsString()) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return v, nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + switch strings.ToLower(val.AsString()) { + case "true": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"true\"") + case "false": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"false\"") + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + } + }, + }, +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 00000000..d89ec380 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go new file mode 100644 index 00000000..2037299b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 00000000..72f307f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/public.go b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go new file mode 100644 index 00000000..3b50a692 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/hashicorp/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go new file mode 100644 index 00000000..8a9c3276 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go new file mode 100644 index 00000000..b2a3bbe5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go @@ -0,0 +1,357 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + mapCt := 0 + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsMapType(): + mapCt++ + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case mapCt > 0 && (mapCt+dynamicCt) == len(types): + return unifyMapTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // If we fall out here, no unification is possible + return cty.NilType, nil +} + +func unifyMapTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + elemTypes := make([]cty.Type, 0, len(types)) + for _, ty := range types { + elemTypes = append(elemTypes, ty.ElementType()) + } + retElemType, _ := unify(elemTypes, unsafe) + if retElemType == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(retElemType) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return cty.NilType, nil + } + } + + return retTy, conversions +} + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/doc.go new file mode 100644 index 00000000..d31f0547 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go new file mode 100644 index 00000000..31567e76 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go @@ -0,0 +1,194 @@ +package cty + +import ( + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.IsMarked(): + return false + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + val.assertUnmarked() + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/error.go b/vendor/github.com/hashicorp/go-cty/cty/error.go new file mode 100644 index 00000000..dd139f72 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gob.go b/vendor/github.com/hashicorp/go-cty/cty/gob.go new file mode 100644 index 00000000..80929aa5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gob.go @@ -0,0 +1,204 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + if val.IsMarked() { + return nil, errors.New("value is marked") + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // Because big.Float.GobEncode is implemented with a pointer reciever, + // gob encoding of an interface{} containing a *big.Float value does not + // round-trip correctly, emerging instead as a non-pointer big.Float. + // The rest of cty expects all number values to be represented by + // *big.Float, so we'll fix that up here. + gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty) + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} + +// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number +// values through gob: the big.Float.GobEncode method is implemented on a +// pointer receiver, and so it loses the "pointer-ness" of the value on +// encode, causing the values to emerge the other end as big.Float rather than +// *big.Float as we expect elsewhere in cty. +// +// The implementation of gobDecodeFixNumberPtr mutates the given raw value +// during its work, and may either return the same value mutated or a new +// value. Callers must no longer use whatever value they pass as "raw" after +// this function is called. +func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} { + // Unfortunately we need to work recursively here because number values + // might be embedded in structural or collection type values. + + switch { + case ty.Equals(Number): + if bf, ok := raw.(big.Float); ok { + return &bf // wrap in pointer + } + case ty.IsMapType() && ty.ElementType().Equals(Number): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + m[k] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsListType() && ty.ElementType().Equals(Number): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + s[i] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsSetType() && ty.ElementType().Equals(Number): + if s, ok := raw.(set.Set); ok { + newS := set.NewSet(s.Rules()) + for it := s.Iterator(); it.Next(); { + newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType()) + newS.Add(newV) + } + return newS + } + case ty.IsObjectType(): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + aty := ty.AttributeType(k) + m[k] = gobDecodeFixNumberPtr(v, aty) + } + } + case ty.IsTupleType(): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + ety := ty.TupleElementType(i) + s[i] = gobDecodeFixNumberPtr(v, ety) + } + } + } + + return raw +} + +// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr +// that works with already-constructed values. This is primarily for testing, +// to fix up intentionally-invalid number values for the parts of the test +// code that need them to be valid, such as calling GoString on them. +func gobDecodeFixNumberPtrVal(v Value) Value { + raw := gobDecodeFixNumberPtr(v.v, v.ty) + return Value{ + v: raw, + ty: v.ty, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go new file mode 100644 index 00000000..a5177d22 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go new file mode 100644 index 00000000..0677a079 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go new file mode 100644 index 00000000..fc35c169 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go @@ -0,0 +1,548 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go new file mode 100644 index 00000000..404faba1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go @@ -0,0 +1,686 @@ +package gocty + +import ( + "math" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + target.SetBool(val.True()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.SetInt(iv) + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.SetUint(iv) + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32, reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.SetFloat(fv) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.ConvertibleTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type())) + return nil + + case bigIntType.ConvertibleTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type())) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.String: + target.SetString(val.AsString()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go new file mode 100644 index 00000000..b4134253 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/helper.go b/vendor/github.com/hashicorp/go-cty/cty/helper.go new file mode 100644 index 00000000..1b88e9fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json.go b/vendor/github.com/hashicorp/go-cty/cty/json.go new file mode 100644 index 00000000..c421a62e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/doc.go b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go new file mode 100644 index 00000000..8916513d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go new file mode 100644 index 00000000..728ab010 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go @@ -0,0 +1,193 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/simple.go b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go new file mode 100644 index 00000000..aaba8c3b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type.go b/vendor/github.com/hashicorp/go-cty/cty/json/type.go new file mode 100644 index 00000000..59d7f2e1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go new file mode 100644 index 00000000..8adf22bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go @@ -0,0 +1,170 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the [ delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) == ']' { + break + } + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go new file mode 100644 index 00000000..5ad190d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := cty.ParseNumberVal(v) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/value.go b/vendor/github.com/hashicorp/go-cty/cty/json/value.go new file mode 100644 index 00000000..50748f70 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/list_type.go b/vendor/github.com/hashicorp/go-cty/cty/list_type.go new file mode 100644 index 00000000..2ef02a12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/map_type.go b/vendor/github.com/hashicorp/go-cty/cty/map_type.go new file mode 100644 index 00000000..82d36c62 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/marks.go b/vendor/github.com/hashicorp/go-cty/cty/marks.go new file mode 100644 index 00000000..3898e455 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/marks.go @@ -0,0 +1,296 @@ +package cty + +import ( + "fmt" + "strings" +) + +// marker is an internal wrapper type used to add special "marks" to values. +// +// A "mark" is an annotation that can be used to represent additional +// characteristics of values that propagate through operation methods to +// result values. However, a marked value cannot be used with integration +// methods normally associated with its type, in order to ensure that +// calling applications don't inadvertently drop marks as they round-trip +// values out of cty and back in again. +// +// Marked values are created only explicitly by the calling application, so +// an application that never marks a value does not need to worry about +// encountering marked values. +type marker struct { + realV interface{} + marks ValueMarks +} + +// ValueMarks is a map, representing a set, of "mark" values associated with +// a Value. See Value.Mark for more information on the usage of mark values. +type ValueMarks map[interface{}]struct{} + +// NewValueMarks constructs a new ValueMarks set with the given mark values. +func NewValueMarks(marks ...interface{}) ValueMarks { + if len(marks) == 0 { + return nil + } + ret := make(ValueMarks, len(marks)) + for _, v := range marks { + ret[v] = struct{}{} + } + return ret +} + +// Equal returns true if the receiver and the given ValueMarks both contain +// the same marks. +func (m ValueMarks) Equal(o ValueMarks) bool { + if len(m) != len(o) { + return false + } + for v := range m { + if _, ok := o[v]; !ok { + return false + } + } + return true +} + +func (m ValueMarks) GoString() string { + var s strings.Builder + s.WriteString("cty.NewValueMarks(") + i := 0 + for mv := range m { + if i != 0 { + s.WriteString(", ") + } + s.WriteString(fmt.Sprintf("%#v", mv)) + i++ + } + s.WriteString(")") + return s.String() +} + +// IsMarked returns true if and only if the receiving value carries at least +// one mark. A marked value cannot be used directly with integration methods +// without explicitly unmarking it (and retrieving the markings) first. +func (val Value) IsMarked() bool { + _, ok := val.v.(marker) + return ok +} + +// HasMark returns true if and only if the receiving value has the given mark. +func (val Value) HasMark(mark interface{}) bool { + if mr, ok := val.v.(marker); ok { + _, ok := mr.marks[mark] + return ok + } + return false +} + +// ContainsMarked returns true if the receiving value or any value within it +// is marked. +// +// This operation is relatively expensive. If you only need a shallow result, +// use IsMarked instead. +func (val Value) ContainsMarked() bool { + ret := false + Walk(val, func(_ Path, v Value) (bool, error) { + if v.IsMarked() { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +func (val Value) assertUnmarked() { + if val.IsMarked() { + panic("value is marked, so must be unmarked first") + } +} + +// Marks returns a map (representing a set) of all of the mark values +// associated with the receiving value, without changing the marks. Returns nil +// if the value is not marked at all. +func (val Value) Marks() ValueMarks { + if mr, ok := val.v.(marker); ok { + // copy so that the caller can't mutate our internals + ret := make(ValueMarks, len(mr.marks)) + for k, v := range mr.marks { + ret[k] = v + } + return ret + } + return nil +} + +// HasSameMarks returns true if an only if the receiver and the given other +// value have identical marks. +func (val Value) HasSameMarks(other Value) bool { + vm, vmOK := val.v.(marker) + om, omOK := other.v.(marker) + if vmOK != omOK { + return false + } + if vmOK { + return vm.marks.Equal(om.marks) + } + return true +} + +// Mark returns a new value that as the same type and underlying value as +// the receiver but that also carries the given value as a "mark". +// +// Marks are used to carry additional application-specific characteristics +// associated with values. A marked value can be used with operation methods, +// in which case the marks are propagated to the operation results. A marked +// value _cannot_ be used with integration methods, so callers of those +// must derive an unmarked value using Unmark (and thus explicitly handle +// the markings) before calling the integration methods. +// +// The mark value can be any value that would be valid to use as a map key. +// The mark value should be of a named type in order to use the type itself +// as a namespace for markings. That type can be unexported if desired, in +// order to ensure that the mark can only be handled through the defining +// package's own functions. +// +// An application that never calls this method does not need to worry about +// handling marked values. +func (val Value) Mark(mark interface{}) Value { + var newMarker marker + newMarker.realV = val.v + if mr, ok := val.v.(marker); ok { + // It's already a marker, so we'll retain existing marks. + newMarker.marks = make(ValueMarks, len(mr.marks)+1) + for k, v := range mr.marks { + newMarker.marks[k] = v + } + } else { + // It's not a marker yet, so we're creating the first mark. + newMarker.marks = make(ValueMarks, 1) + } + newMarker.marks[mark] = struct{}{} + return Value{ + ty: val.ty, + v: newMarker, + } +} + +// Unmark separates the marks of the receiving value from the value itself, +// removing a new unmarked value and a map (representing a set) of the marks. +// +// If the receiver isn't marked, Unmark returns it verbatim along with a nil +// map of marks. +func (val Value) Unmark() (Value, ValueMarks) { + if !val.IsMarked() { + return val, nil + } + mr := val.v.(marker) + marks := val.Marks() // copy so that the caller can't mutate our internals + return Value{ + ty: val.ty, + v: mr.realV, + }, marks +} + +// UnmarkDeep is similar to Unmark, but it works with an entire nested structure +// rather than just the given value directly. +// +// The result is guaranteed to contain no nested values that are marked, and +// the returned marks set includes the superset of all of the marks encountered +// during the operation. +func (val Value) UnmarkDeep() (Value, ValueMarks) { + marks := make(ValueMarks) + ret, _ := Transform(val, func(_ Path, v Value) (Value, error) { + unmarkedV, valueMarks := v.Unmark() + for m, s := range valueMarks { + marks[m] = s + } + return unmarkedV, nil + }) + return ret, marks +} + +func (val Value) unmarkForce() Value { + unw, _ := val.Unmark() + return unw +} + +// WithMarks returns a new value that has the same type and underlying value +// as the receiver and also has the marks from the given maps (representing +// sets). +func (val Value) WithMarks(marks ...ValueMarks) Value { + if len(marks) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, s := range marks { + markCount += len(s) + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, s := range marks { + for m := range s { + newMarks[m] = struct{}{} + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} + +// WithSameMarks returns a new value that has the same type and underlying +// value as the receiver and also has the marks from the given source values. +// +// Use this if you are implementing your own higher-level operations against +// cty using the integration methods, to re-introduce the marks from the +// source values of the operation. +func (val Value) WithSameMarks(srcs ...Value) Value { + if len(srcs) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + markCount += len(mr.marks) + } + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + for m := range mr.marks { + newMarks[m] = struct{}{} + } + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go similarity index 95% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go index 1b631d0a..ce59d9ff 100644 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go @@ -3,8 +3,8 @@ package msgpack import ( "bytes" + "github.com/hashicorp/go-cty/cty" "github.com/vmihailenco/msgpack" - "github.com/zclconf/go-cty/cty" ) type dynamicVal struct { diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go new file mode 100644 index 00000000..8a43c16a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,211 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/vmihailenco/msgpack" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 00000000..86664bac --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go similarity index 100% rename from vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go rename to vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 00000000..67f4c9a4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,334 @@ +package msgpack + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + dec.Skip() // skip what we've peeked + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + dec.Skip() // skip what we've peeked + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + v, err := cty.ParseNumberVal(rv) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return v, nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.List(ety)), nil + case length == 0: + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Set(ety)), nil + case length == 0: + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Map(ety)), nil + case length == 0: + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Tuple(etys)), nil + case length == 0: + return cty.TupleVal(nil), nil + case length != len(etys): + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Object(atys)), nil + case length == 0: + return cty.ObjectVal(nil), nil + case length != len(atys): + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", + len(atys), length) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + switch { + case length == -1: + return cty.NullVal(cty.DynamicPseudoType), nil + case length != 2: + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/null.go b/vendor/github.com/hashicorp/go-cty/cty/null.go new file mode 100644 index 00000000..d58d0287 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/object_type.go b/vendor/github.com/hashicorp/go-cty/cty/object_type.go new file mode 100644 index 00000000..187d3875 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path.go b/vendor/github.com/hashicorp/go-cty/cty/path.go new file mode 100644 index 00000000..636e68c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path.go @@ -0,0 +1,270 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// IndexInt is a typed convenience method for Index. +func (p Path) IndexInt(v int) Path { + return p.Index(NumberIntVal(int64(v))) +} + +// IndexString is a typed convenience method for Index. +func (p Path) IndexString(v string) Path { + return p.Index(StringVal(v)) +} + +// IndexPath is a convenience method to start a new Path with an IndexStep. +func IndexPath(v Value) Path { + return Path{}.Index(v) +} + +// IndexIntPath is a typed convenience method for IndexPath. +func IndexIntPath(v int) Path { + return IndexPath(NumberIntVal(int64(v))) +} + +// IndexStringPath is a typed convenience method for IndexPath. +func IndexStringPath(v string) Path { + return IndexPath(StringVal(v)) +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Equals compares 2 Paths for exact equality. +func (p Path) Equals(other Path) bool { + if len(p) != len(other) { + return false + } + + for i := range p { + pv := p[i] + switch pv := pv.(type) { + case GetAttrStep: + ov, ok := other[i].(GetAttrStep) + if !ok || pv != ov { + return false + } + case IndexStep: + ov, ok := other[i].(IndexStep) + if !ok { + return false + } + + if !pv.Key.RawEquals(ov.Key) { + return false + } + default: + // Any invalid steps default to evaluating false. + return false + } + } + + return true + +} + +// HasPrefix determines if the path p contains the provided prefix. +func (p Path) HasPrefix(prefix Path) bool { + if len(prefix) > len(p) { + return false + } + + return p[:len(prefix)].Equals(prefix) +} + +// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. +func GetAttrPath(name string) Path { + return Path{}.GetAttr(name) +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + + switch s.Key.Type() { + case Number: + if !(val.Type().IsListType() || val.Type().IsTupleType()) { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path_set.go b/vendor/github.com/hashicorp/go-cty/cty/path_set.go new file mode 100644 index 00000000..977523de --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/hashicorp/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go new file mode 100644 index 00000000..7b3d1196 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/gob.go b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go new file mode 100644 index 00000000..da2978f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go new file mode 100644 index 00000000..4a60494f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go @@ -0,0 +1,15 @@ +package set + +type Iterator struct { + vals []interface{} + idx int +} + +func (it *Iterator) Value() interface{} { + return it.vals[it.idx] +} + +func (it *Iterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/ops.go b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go new file mode 100644 index 00000000..fd1555f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go @@ -0,0 +1,210 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set. If the set's rules +// implement OrderedRules then the result is ordered per those rules. If +// no order is provided, or if it is not a total order, then the iteration +// order is undefined but consistent for a particular version of cty. Do not +// rely on specific ordering between cty releases unless the rules order is a +// total order. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + vals := s.Values() + + return &Iterator{ + vals: vals, + idx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all the values in the set. If the set rules have +// an order then the result is in that order. If no order is provided or if +// it is not a total order then the result order is undefined, but consistent +// for a particular set value within a specific release of cty. +func (s Set) Values() []interface{} { + var ret []interface{} + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIDs := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIDs = append(bucketIDs, id) + } + sort.Ints(bucketIDs) + + for _, bucketID := range bucketIDs { + ret = append(ret, s.vals[bucketID]...) + } + + if orderRules, ok := s.rules.(OrderedRules); ok { + sort.SliceStable(ret, func(i, j int) bool { + return orderRules.Less(ret[i], ret[j]) + }) + } + + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/rules.go b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go new file mode 100644 index 00000000..51f744b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go @@ -0,0 +1,43 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} + +// OrderedRules is an extension of Rules that can apply a partial order to +// element values. When a set's Rules implements OrderedRules an iterator +// over the set will return items in the order described by the rules. +// +// If the given order is not a total order (that is, some pairs of non-equivalent +// elements do not have a defined order) then the resulting iteration order +// is undefined but consistent for a particular version of cty. The exact +// order in that case is not part of the contract and is subject to change +// between versions. +type OrderedRules interface { + Rules + + // Less returns true if and only if the first argument should sort before + // the second argument. If the second argument should sort before the first + // or if there is no defined order for the values, return false. + Less(interface{}, interface{}) bool +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/set.go b/vendor/github.com/hashicorp/go-cty/cty/set/set.go new file mode 100644 index 00000000..b4fb316f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_helper.go b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go new file mode 100644 index 00000000..31622842 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go @@ -0,0 +1,132 @@ +package cty + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +// +// It also panics if the given value is marked, because marked values cannot +// be stored in sets. +func (s ValueSet) requireElementType(v Value) { + if v.IsMarked() { + panic("cannot store marked value directly in a set (make the set itself unknown instead)") + } + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_internals.go b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go new file mode 100644 index 00000000..40801980 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go @@ -0,0 +1,244 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +var _ set.OrderedRules = setRules{} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes, marks := makeSetHashBytes(val) + if len(marks) > 0 { + panic("can't take hash of value that has marks or has embedded values that have marks") + } + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +// Less is an implementation of set.OrderedRules so that we can iterate over +// set elements in a consistent order, where such an order is possible. +func (r setRules) Less(v1, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less + return false + } + + // Null values always sort after non-null values + if v2v.IsNull() && !v1v.IsNull() { + return true + } else if v1v.IsNull() { + return false + } + // Unknown values always sort after known values + if v1v.IsKnown() && !v2v.IsKnown() { + return true + } else if !v1v.IsKnown() { + return false + } + + switch r.Type { + case String: + // String values sort lexicographically + return v1v.AsString() < v2v.AsString() + case Bool: + // Weird to have a set of bools, but if we do then false sorts before true. + if v2v.True() || !v1v.True() { + return true + } + return false + case Number: + v1f := v1v.AsBigFloat() + v2f := v2v.AsBigFloat() + return v1f.Cmp(v2f) < 0 + default: + // No other types have a well-defined ordering, so we just produce a + // default consistent-but-undefined ordering then. This situation is + // not considered a compatibility constraint; callers should rely only + // on the ordering rules for primitive values. + v1h, _ := makeSetHashBytes(v1v) + v2h, _ := makeSetHashBytes(v2v) + return bytes.Compare(v1h, v2h) < 0 + } +} + +func makeSetHashBytes(val Value) ([]byte, ValueMarks) { + var buf bytes.Buffer + marks := make(ValueMarks) + appendSetHashBytes(val, &buf, marks) + return buf.Bytes(), marks +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + + // Marks aren't considered part of a value for equality-testing purposes, + // so we'll unmark our value before we work with it but we'll remember + // the marks in case the caller needs to re-apply them to a derived + // value. + if val.IsMarked() { + unmarkedVal, valMarks := val.Unmark() + for m := range valMarks { + marks[m] = struct{}{} + } + val = unmarkedVal + } + + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + // Due to an unfortunate quirk of gob encoding for big.Float, we end up + // with non-pointer values immediately after a gob round-trip, and + // we end up in here before we've had a chance to run + // gobDecodeFixNumberPtr on the inner values of a gob-encoded set, + // and so sadly we must make a special effort to handle that situation + // here just so that we can get far enough along to fix it up for + // everything else in this package. + if bf, ok := val.v.(big.Float); ok { + buf.WriteString(bf.String()) + return + } + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf, marks) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf, marks) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_type.go b/vendor/github.com/hashicorp/go-cty/cty/set_type.go new file mode 100644 index 00000000..cbc3706f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go new file mode 100644 index 00000000..798cacd6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/type.go b/vendor/github.com/hashicorp/go-cty/cty/type.go new file mode 100644 index 00000000..730cb986 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/hashicorp/go-cty/cty/type_conform.go b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go new file mode 100644 index 00000000..476eeea8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go @@ -0,0 +1,139 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go new file mode 100644 index 00000000..ec05bb18 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/hashicorp/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/hashicorp/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/unknown.go new file mode 100644 index 00000000..e54179eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go new file mode 100644 index 00000000..ba926475 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value.go b/vendor/github.com/hashicorp/go-cty/cty/value.go new file mode 100644 index 00000000..1025ba82 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value.go @@ -0,0 +1,108 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsKnown() + } + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + if val.IsMarked() { + return val.unmarkForce().IsNull() + } + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsWhollyKnown() + } + + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_init.go b/vendor/github.com/hashicorp/go-cty/cty/value_init.go new file mode 100644 index 00000000..853a5a7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_init.go @@ -0,0 +1,324 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/hashicorp/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + +// MustParseNumberVal is like ParseNumberVal but it will panic in case of any +// error. It can be used during initialization or any other situation where +// the given string is a constant or otherwise known to be correct by the +// caller. +func MustParseNumberVal(s string) Value { + ret, err := ParseNumberVal(s) + if err != nil { + panic(err) + } + return ret +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + var markSets []ValueMarks + + for i, val := range vals { + if unmarkedVal, marks := val.UnmarkDeep(); len(marks) > 0 { + val = unmarkedVal + markSets = append(markSets, marks) + } + if val.ContainsMarked() { + // FIXME: Allow this, but unmark the values and apply the + // marking to the set itself instead. + panic("set cannot contain marked values") + } + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + }.WithMarks(markSets...) +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_ops.go b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go new file mode 100644 index 00000000..69e5a8ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go @@ -0,0 +1,1290 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GoString is an implementation of fmt.GoStringer that produces concise +// source-like representations of values suitable for use in debug messages. +func (val Value) GoString() string { + if val.IsMarked() { + unVal, marks := val.Unmark() + if len(marks) == 1 { + var mark interface{} + for m := range marks { + mark = m + } + return fmt.Sprintf("%#v.Mark(%#v)", unVal, mark) + } + return fmt.Sprintf("%#v.WithMarks(%#v)", unVal, marks) + } + + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } + return "cty.False" + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1)) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.SetVal(%#v)", vals) + case val.ty.IsListType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.ListVal(%#v)", vals) + case val.ty.IsMapType(): + vals := val.AsValueMap() + if len(vals) == 0 { + return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.MapVal(%#v)", vals) + case val.ty.IsTupleType(): + if val.ty.Equals(EmptyTuple) { + return "cty.EmptyTupleVal" + } + vals := val.AsValueSlice() + return fmt.Sprintf("cty.TupleVal(%#v)", vals) + case val.ty.IsObjectType(): + if val.ty.Equals(EmptyObject) { + return "cty.EmptyObjectVal" + } + vals := val.AsValueMap() + return fmt.Sprintf("cty.ObjectVal(%#v)", vals) + case val.ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().GoString + if impl == nil { + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + return impl(val.EncapsulatedValue()) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. +func (val Value) Equals(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Equals(other).WithMarks(valMarks, otherMarks) + } + + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown + return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + } + + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false + return BoolVal(false) + } + + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().Equals + if impl == nil { + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + } + return BoolVal(impl(val.v, other.v)) + } + ret := impl(val.v, other.v) + if !ret.Type().Equals(Bool) { + panic(fmt.Sprintf("Equals for %#v returned %#v, not cty.Bool", ty, ret.Type())) + } + return ret + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + val.assertUnmarked() + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if !val.HasSameMarks(other) { + return false + } + // Since we've now checked the marks, we'll unmark for the rest of this... + val = val.unmarkForce() + other = other.unmarkForce() + + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + } + return impl(val.v, other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Add(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Subtract(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Negate().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Multiply(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Divide(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Modulo(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Absolute().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.GetAttr(name).WithMarks(valMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.Index(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.HasIndex(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + if val.IsMarked() || elem.IsMarked() { + val, valMarks := val.Unmark() + elem, elemMarks := elem.Unmark() + return val.HasElement(elem).WithMarks(valMarks, elemMarks) + } + + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Length().WithMarks(valMarks) + } + + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + val.assertUnmarked() + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + val.assertUnmarked() + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + val.assertUnmarked() + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Not().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.And(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Or(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.LessThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.GreaterThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + val.assertUnmarked() + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + val.assertUnmarked() + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + val.assertUnmarked() + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + val.assertUnmarked() + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/walk.go b/vendor/github.com/hashicorp/go-cty/cty/walk.go new file mode 100644 index 00000000..a6943bab --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index bbcd15de..e5395894 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -1,12 +1,10 @@ # go-getter [![CircleCI](https://circleci.com/gh/hashicorp/go-getter/tree/master.svg?style=svg)][circleci] -[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] [circleci]: https://circleci.com/gh/hashicorp/go-getter/tree/master [godocs]: http://godoc.org/github.com/hashicorp/go-getter -[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master go-getter is a library for Go (golang) for downloading files or directories from various sources using a URL as the primary form of input. @@ -82,6 +80,8 @@ is built-in by default: file URLs. * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically changed to Git protocol over HTTP. + * GitLab URLs, such as "gitlab.com/inkscape/inkscape" are automatically + changed to Git protocol over HTTP. * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically changed to a Git or mercurial protocol using the BitBucket API. @@ -139,8 +139,8 @@ If you downloaded this to the `/tmp` directory, then the file directory in this repository, but because we specified a subdirectory, go-getter automatically copied only that directory contents. -Subdirectory paths may contain may also use filesystem glob patterns. -The path must match _exactly one_ entry or go-getter will return an error. +Subdirectory paths may also use filesystem glob patterns. The path must +match _exactly one_ entry or go-getter will return an error. This is useful if you're not sure the exact directory name but it follows a predictable naming structure. @@ -318,6 +318,7 @@ are also supported. If the query parameters are present, these take priority. * `aws_access_key_id` - AWS access key. * `aws_access_key_secret` - AWS access key secret. * `aws_access_token` - AWS access token if this is being used. + * `aws_profile` - Use this profile from local ~/.aws/ config. Takes priority over the other three. #### Using IAM Instance Profiles with S3 diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml deleted file mode 100644 index 1e8718e1..00000000 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2017 -clone_folder: c:\gopath\github.com\hashicorp\go-getter -environment: - GOPATH: c:\gopath -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test ./... diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 38fb43b8..78a96bf0 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -39,6 +39,10 @@ type Client struct { // for documentation. Mode ClientMode + // Umask is used to mask file permissions when storing local files or decompressing + // an archive + Umask os.FileMode + // Detectors is the list of detectors that are tried on the source. // If this is nil, then the default Detectors will be used. Detectors []Detector @@ -66,6 +70,20 @@ type Client struct { Options []ClientOption } +// umask returns the effective umask for the Client, defaulting to the process umask +func (c *Client) umask() os.FileMode { + if c == nil { + return 0 + } + return c.Umask +} + +// mode returns file mode umasked by the Client umask +func (c *Client) mode(mode os.FileMode) os.FileMode { + m := mode & ^c.umask() + return m +} + // Get downloads the configured source to the destination. func (c *Client) Get() error { if err := c.Configure(c.Options...); err != nil { @@ -233,7 +251,7 @@ func (c *Client) Get() error { if decompressor != nil { // We have a decompressor, so decompress the current destination // into the final destination with the proper mode. - err := decompressor.Decompress(decompressDst, dst, decompressDir) + err := decompressor.Decompress(decompressDst, dst, decompressDir, c.umask()) if err != nil { return err } @@ -281,7 +299,7 @@ func (c *Client) Get() error { if err := os.RemoveAll(realDst); err != nil { return err } - if err := os.MkdirAll(realDst, 0755); err != nil { + if err := os.MkdirAll(realDst, c.mode(0755)); err != nil { return err } @@ -291,7 +309,7 @@ func (c *Client) Get() error { return err } - return copyDir(c.Ctx, realDst, subDir, false) + return copyDir(c.Ctx, realDst, subDir, false, c.umask()) } return nil diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go index 641fe6d0..a629306b 100644 --- a/vendor/github.com/hashicorp/go-getter/copy_dir.go +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -7,11 +7,16 @@ import ( "strings" ) +// mode returns the file mode masked by the umask +func mode(mode, umask os.FileMode) os.FileMode { + return mode & ^umask +} + // copyDir copies the src directory contents into dst. Both directories // should already exist. // // If ignoreDot is set to true, then dot-prefixed files/folders are ignored. -func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error { +func copyDir(ctx context.Context, dst string, src string, ignoreDot bool, umask os.FileMode) error { src, err := filepath.EvalSymlinks(src) if err != nil { return err @@ -46,7 +51,7 @@ func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error return nil } - if err := os.MkdirAll(dstPath, 0755); err != nil { + if err := os.MkdirAll(dstPath, mode(0755, umask)); err != nil { return err } @@ -54,24 +59,8 @@ func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error } // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := Copy(ctx, dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) + _, err = copyFile(ctx, dstPath, path, info.Mode(), umask) + return err } return filepath.Walk(src, walkFn) diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go index 198bb0ed..62ca1060 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -1,6 +1,7 @@ package getter import ( + "os" "strings" ) @@ -14,7 +15,7 @@ type Decompressor interface { // Decompress should decompress src to dst. dir specifies whether dst // is a directory or single file. src is guaranteed to be a single file // that exists. dst is not guaranteed to exist already. - Decompress(dst, src string, dir bool) error + Decompress(dst, src string, dir bool, umask os.FileMode) error } // Decompressors is the mapping of extension to the Decompressor implementation diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go index 339f4cf7..a5373e4e 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go @@ -3,7 +3,6 @@ package getter import ( "compress/bzip2" "fmt" - "io" "os" "path/filepath" ) @@ -12,14 +11,14 @@ import ( // decompress bz2 files. type Bzip2Decompressor struct{} -func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { +func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // Directory isn't supported at all if dir { return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") } // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { return err } @@ -34,12 +33,5 @@ func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { bzipR := bzip2.NewReader(f) // Copy it out - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, bzipR) - return err + return copyReader(dst, bzipR, 0622, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go index 5ebf709b..669e5eaf 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -3,7 +3,6 @@ package getter import ( "compress/gzip" "fmt" - "io" "os" "path/filepath" ) @@ -12,14 +11,14 @@ import ( // decompress gzip files. type GzipDecompressor struct{} -func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { +func (d *GzipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // Directory isn't supported at all if dir { return fmt.Errorf("gzip-compressed files can only unarchive to a single file") } // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { return err } @@ -38,12 +37,5 @@ func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { defer gzipR.Close() // Copy it out - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, gzipR) - return err + return copyReader(dst, gzipR, 0622, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go index b6986a25..4b58b036 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tar.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -11,7 +11,7 @@ import ( // untar is a shared helper for untarring an archive. The reader should provide // an uncompressed view of the tar archive. -func untar(input io.Reader, dst, src string, dir bool) error { +func untar(input io.Reader, dst, src string, dir bool, umask os.FileMode) error { tarR := tar.NewReader(input) done := false dirHdrs := []*tar.Header{} @@ -51,7 +51,7 @@ func untar(input io.Reader, dst, src string, dir bool) error { } // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { + if err := os.MkdirAll(path, mode(0755, umask)); err != nil { return err } @@ -67,7 +67,7 @@ func untar(input io.Reader, dst, src string, dir bool) error { // Check that the directory exists, otherwise create it if _, err := os.Stat(dstPath); os.IsNotExist(err) { - if err := os.MkdirAll(dstPath, 0755); err != nil { + if err := os.MkdirAll(dstPath, mode(0755, umask)); err != nil { return err } } @@ -82,20 +82,10 @@ func untar(input io.Reader, dst, src string, dir bool) error { done = true // Open the file for writing - dstF, err := os.Create(path) + err = copyReader(path, tarR, hdr.FileInfo().Mode(), umask) if err != nil { return err } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } // Set the access and modification time if valid, otherwise default to current time aTime := now @@ -115,7 +105,7 @@ func untar(input io.Reader, dst, src string, dir bool) error { for _, dirHdr := range dirHdrs { path := filepath.Join(dst, dirHdr.Name) // Chmod the directory since they might be created before we know the mode flags - if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil { + if err := os.Chmod(path, mode(dirHdr.FileInfo().Mode(), umask)); err != nil { return err } // Set the mtime/atime attributes since they would have been changed during extraction @@ -139,13 +129,13 @@ func untar(input io.Reader, dst, src string, dir bool) error { // unpack tar files. type tarDecompressor struct{} -func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { +func (d *tarDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // If we're going into a directory we should make that first mkdir := dst if !dir { mkdir = filepath.Dir(dst) } - if err := os.MkdirAll(mkdir, 0755); err != nil { + if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { return err } @@ -156,5 +146,5 @@ func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { } defer f.Close() - return untar(f, dst, src, dir) + return untar(f, dst, src, dir, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go index 5391b5c8..e2e5458c 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -10,13 +10,13 @@ import ( // decompress tar.bz2 files. type TarBzip2Decompressor struct{} -func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { +func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // If we're going into a directory we should make that first mkdir := dst if !dir { mkdir = filepath.Dir(dst) } - if err := os.MkdirAll(mkdir, 0755); err != nil { + if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { return err } @@ -29,5 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { // Bzip2 compression is second bzipR := bzip2.NewReader(f) - return untar(bzipR, dst, src, dir) + return untar(bzipR, dst, src, dir, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go index b2f662a8..b18bd6cb 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -47,7 +47,7 @@ func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { defer os.RemoveAll(td) // Decompress - err := d.Decompress(dst, tc.Input, tc.Dir) + err := d.Decompress(dst, tc.Input, tc.Dir, 0022) if (err != nil) != tc.Err { t.Fatalf("err %s: %s", tc.Input, err) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go index 65eb70dd..84c4aa33 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -11,13 +11,13 @@ import ( // decompress tar.gzip files. type TarGzipDecompressor struct{} -func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { +func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // If we're going into a directory we should make that first mkdir := dst if !dir { mkdir = filepath.Dir(dst) } - if err := os.MkdirAll(mkdir, 0755); err != nil { + if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { return err } @@ -35,5 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { } defer gzipR.Close() - return untar(gzipR, dst, src, dir) + return untar(gzipR, dst, src, dir, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go index 5e151c12..24686f45 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_txz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -12,13 +12,13 @@ import ( // decompress tar.xz files. type TarXzDecompressor struct{} -func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // If we're going into a directory we should make that first mkdir := dst if !dir { mkdir = filepath.Dir(dst) } - if err := os.MkdirAll(mkdir, 0755); err != nil { + if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { return err } @@ -35,5 +35,5 @@ func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) } - return untar(txzR, dst, src, dir) + return untar(txzR, dst, src, dir, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go index 4e37abab..de5d6ce2 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_xz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -2,7 +2,6 @@ package getter import ( "fmt" - "io" "os" "path/filepath" @@ -13,14 +12,14 @@ import ( // decompress xz files. type XzDecompressor struct{} -func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { +func (d *XzDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // Directory isn't supported at all if dir { return fmt.Errorf("xz-compressed files can only unarchive to a single file") } // If we're going into a directory we should make that first - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), mode(0755, umask)); err != nil { return err } @@ -38,12 +37,5 @@ func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { } // Copy it out - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = io.Copy(dstF, xzR) - return err + return copyReader(dst, xzR, 0622, umask) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go index 0830f791..4876a337 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -3,7 +3,6 @@ package getter import ( "archive/zip" "fmt" - "io" "os" "path/filepath" ) @@ -12,13 +11,13 @@ import ( // decompress zip files. type ZipDecompressor struct{} -func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { +func (d *ZipDecompressor) Decompress(dst, src string, dir bool, umask os.FileMode) error { // If we're going into a directory we should make that first mkdir := dst if !dir { mkdir = filepath.Dir(dst) } - if err := os.MkdirAll(mkdir, 0755); err != nil { + if err := os.MkdirAll(mkdir, mode(0755, umask)); err != nil { return err } @@ -56,7 +55,7 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { } // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { + if err := os.MkdirAll(path, mode(0755, umask)); err != nil { return err } @@ -67,34 +66,23 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { // required to contain entries for just the directories so this // can happen. if dir { - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(path), mode(0755, umask)); err != nil { return err } } // Open the file for reading srcF, err := f.Open() - if err != nil { - return err - } - - // Open the file for writing - dstF, err := os.Create(path) if err != nil { srcF.Close() return err } - _, err = io.Copy(dstF, srcF) + + err = copyReader(path, srcF, f.Mode(), umask) srcF.Close() - dstF.Close() if err != nil { return err } - - // Chmod the file - if err := os.Chmod(path, f.Mode()); err != nil { - return err - } } return nil diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go index 5bb750c9..f134f770 100644 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -23,6 +23,7 @@ var Detectors []Detector func init() { Detectors = []Detector{ new(GitHubDetector), + new(GitLabDetector), new(GitDetector), new(BitBucketDetector), new(S3Detector), diff --git a/vendor/github.com/hashicorp/go-getter/detect_gitlab.go b/vendor/github.com/hashicorp/go-getter/detect_gitlab.go new file mode 100644 index 00000000..9d1e8d83 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_gitlab.go @@ -0,0 +1,47 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GitLabDetector implements Detector to detect GitLab URLs and turn +// them into URLs that the Git Getter can understand. +type GitLabDetector struct{} + +func (d *GitLabDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "gitlab.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *GitLabDetector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 3 { + return "", false, fmt.Errorf( + "GitLab URLs should be gitlab.com/username/repo") + } + + urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) + repoUrl, err := url.Parse(urlStr) + if err != nil { + return "", true, fmt.Errorf("error parsing GitLab URL: %s", err) + } + + if !strings.HasSuffix(repoUrl.Path, ".git") { + repoUrl.Path += ".git" + } + + if len(parts) > 3 { + repoUrl.Path += "//" + strings.Join(parts[3:], "/") + } + + return "git::" + repoUrl.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/get_file_copy.go index d70fb495..29abbd1a 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_copy.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_copy.go @@ -3,6 +3,7 @@ package getter import ( "context" "io" + "os" ) // readerFunc is syntactic sugar for read interface. @@ -27,3 +28,48 @@ func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) { } })) } + +// copyReader copies from an io.Reader into a file, using umask to create the dst file +func copyReader(dst string, src io.Reader, fmode, umask os.FileMode) error { + dstF, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fmode) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, src) + if err != nil { + return err + } + + // Explicitly chmod; the process umask is unconditionally applied otherwise. + // We'll mask the mode with our own umask, but that may be different than + // the process umask + return os.Chmod(dst, mode(fmode, umask)) +} + +// copyFile copies a file in chunks from src path to dst path, using umask to create the dst file +func copyFile(ctx context.Context, dst, src string, fmode, umask os.FileMode) (int64, error) { + srcF, err := os.Open(src) + if err != nil { + return 0, err + } + defer srcF.Close() + + dstF, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fmode) + if err != nil { + return 0, err + } + defer dstF.Close() + + count, err := Copy(ctx, dstF, srcF) + if err != nil { + return 0, err + } + + // Explicitly chmod; the process umask is unconditionally applied otherwise. + // We'll mask the mode with our own umask, but that may be different than + // the process umask + err = os.Chmod(dst, mode(fmode, umask)) + return count, err +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go index c3b28ae5..40ebc5af 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -41,7 +41,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } @@ -56,13 +56,15 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } // The source path must exist and be a file to be usable. - if fi, err := os.Stat(path); err != nil { + var fi os.FileInfo + var err error + if fi, err = os.Stat(path); err != nil { return fmt.Errorf("source path error: %s", err) } else if fi.IsDir() { return fmt.Errorf("source path must be a file") } - _, err := os.Lstat(dst) + _, err = os.Lstat(dst) if err != nil && !os.IsNotExist(err) { return err } @@ -76,7 +78,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err = os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } @@ -86,18 +88,6 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } // Copy - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = Copy(ctx, dstF, srcF) + _, err = copyFile(ctx, dst, path, fi.Mode(), g.client.umask()) return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go index 24f1acb1..909d5b00 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -45,7 +45,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } @@ -88,7 +88,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } @@ -112,19 +112,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } // Copy - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dst) - if err != nil { - return err - } - defer dstF.Close() - - _, err = Copy(ctx, dstF, srcF) + _, err = copyFile(ctx, dst, path, 0666, g.client.umask()) return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_gcs.go b/vendor/github.com/hashicorp/go-getter/get_gcs.go index 6faa70f4..4b895877 100644 --- a/vendor/github.com/hashicorp/go-getter/get_gcs.go +++ b/vendor/github.com/hashicorp/go-getter/get_gcs.go @@ -77,7 +77,7 @@ func (g *GCSGetter) Get(dst string, u *url.URL) error { } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } @@ -138,18 +138,11 @@ func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, defer rc.Close() // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } - f, err := os.Create(dst) - if err != nil { - return err - } - defer f.Close() - - _, err = Copy(ctx, f, rc) - return err + return copyReader(dst, rc, 0666, g.client.umask()) } func (g *GCSGetter) parseURL(u *url.URL) (bucket, path string, err error) { diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 9ffdba78..b0229d76 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -88,7 +88,7 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } if g.Header != nil { - req.Header = g.Header + req.Header = g.Header.Clone() } resp, err := g.Client.Do(req) @@ -145,11 +145,11 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { } } // Create all the parent directories if needed - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } - f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) + f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, g.client.mode(0666)) if err != nil { return err } @@ -169,7 +169,7 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { return err } if g.Header != nil { - req.Header = g.Header + req.Header = g.Header.Clone() } headResp, err := g.Client.Do(req) if err == nil { @@ -259,11 +259,11 @@ func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) } // Make the final destination - if err := os.MkdirAll(dst, 0755); err != nil { + if err := os.MkdirAll(dst, g.client.mode(0755)); err != nil { return err } - return copyDir(ctx, dst, sourcePath, false) + return copyDir(ctx, dst, sourcePath, false, g.client.umask()) } // parseMeta looks for the first meta tag in the given reader that diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go index 93eeb0b8..5b2dd5e0 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -30,9 +30,10 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { } // Create client config - config := g.getAWSConfig(region, u, creds) - sess := session.New(config) - client := s3.New(sess) + client, err := g.newS3Client(region, u, creds) + if err != nil { + return 0, err + } // List the object(s) at the given prefix req := &s3.ListObjectsInput{ @@ -84,13 +85,14 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } - config := g.getAWSConfig(region, u, creds) - sess := session.New(config) - client := s3.New(sess) + client, err := g.newS3Client(region, u, creds) + if err != nil { + return err + } // List files in path, keep listing until no more objects are found lastMarker := "" @@ -144,9 +146,11 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, u, creds) - sess := session.New(config) - client := s3.New(sess) + client, err := g.newS3Client(region, u, creds) + if err != nil { + return err + } + return g.getObject(ctx, client, dst, bucket, path, version) } @@ -165,36 +169,24 @@ func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, ke } // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { return err } - f, err := os.Create(dst) - if err != nil { - return err - } - defer f.Close() - - _, err = Copy(ctx, f, resp.Body) - return err + return copyReader(dst, resp.Body, 0666, g.client.umask()) } func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { conf := &aws.Config{} - if creds == nil { - // Grab the metadata URL - metadataURL := os.Getenv("AWS_METADATA_URL") - if metadataURL == "" { - metadataURL = "http://169.254.169.254:80/latest" - } - + metadataURLOverride := os.Getenv("AWS_METADATA_URL") + if creds == nil && metadataURLOverride != "" { creds = credentials.NewChainCredentials( []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.New(session.New(&aws.Config{ - Endpoint: aws.String(metadataURL), + Endpoint: aws.String(metadataURLOverride), })), }, }) @@ -213,7 +205,7 @@ func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials. conf.Region = aws.String(region) } - return conf + return conf.WithCredentialsChainVerboseErrors(true) } func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { @@ -273,3 +265,25 @@ func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, c return } + +func (g *S3Getter) newS3Client( + region string, url *url.URL, creds *credentials.Credentials, +) (*s3.S3, error) { + var sess *session.Session + + if profile := url.Query().Get("aws_profile"); profile != "" { + var err error + sess, err = session.NewSessionWithOptions(session.Options{ + Profile: profile, + SharedConfigState: session.SharedConfigEnable, + }) + if err != nil { + return nil, err + } + } else { + config := g.getAWSConfig(region, url, creds) + sess = session.New(config) + } + + return s3.New(sess), nil +} diff --git a/vendor/github.com/hashicorp/go-getter/go.mod b/vendor/github.com/hashicorp/go-getter/go.mod index a869e8f8..14af6b71 100644 --- a/vendor/github.com/hashicorp/go-getter/go.mod +++ b/vendor/github.com/hashicorp/go-getter/go.mod @@ -17,7 +17,9 @@ require ( github.com/mitchellh/go-testing-interface v1.0.0 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 // indirect - github.com/ulikunitz/xz v0.5.5 + github.com/ulikunitz/xz v0.5.8 google.golang.org/api v0.9.0 gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect ) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-getter/go.sum b/vendor/github.com/hashicorp/go-getter/go.sum index b88c747c..c16d1b4c 100644 --- a/vendor/github.com/hashicorp/go-getter/go.sum +++ b/vendor/github.com/hashicorp/go-getter/go.sum @@ -68,8 +68,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 1153e285..5d56f4b5 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -128,6 +128,21 @@ stdLogger.Printf("[DEBUG] %+v", stdLogger) ... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} ``` +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + Notice that if `appLogger` is initialized with the `INFO` log level _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 00000000..44aa9bf2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 00000000..23486b6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 00000000..7815f501 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go new file mode 100644 index 00000000..cfd4307a --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -0,0 +1,71 @@ +package hclog + +import ( + "regexp" + "strings" +) + +// ExcludeByMessage provides a simple way to build a list of log messages that +// can be queried and matched. This is meant to be used with the Exclude +// option on Options to suppress log messages. This does not hold any mutexs +// within itself, so normal usage would be to Add entries at setup and none after +// Exclude is going to be called. Exclude is called with a mutex held within +// the Logger, so that doesn't need to use a mutex. Example usage: +// +// f := new(ExcludeByMessage) +// f.Add("Noisy log message text") +// appLogger.Exclude = f.Exclude +type ExcludeByMessage struct { + messages map[string]struct{} +} + +// Add a message to be filtered. Do not call this after Exclude is to be called +// due to concurrency issues. +func (f *ExcludeByMessage) Add(msg string) { + if f.messages == nil { + f.messages = make(map[string]struct{}) + } + + f.messages[msg] = struct{}{} +} + +// Return true if the given message should be included +func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool { + _, ok := f.messages[msg] + return ok +} + +// ExcludeByPrefix is a simple type to match a message string that has a common prefix. +type ExcludeByPrefix string + +// Matches an message that starts with the prefix. +func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool { + return strings.HasPrefix(msg, string(p)) +} + +// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches +// the log entry is excluded. +type ExcludeByRegexp struct { + Regexp *regexp.Regexp +} + +// Exclude the log message if the message string matches the regexp +func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool { + return e.Regexp.MatchString(msg) +} + +// ExcludeFuncs is a slice of functions that will called to see if a log entry +// should be filtered or not. It stops calling functions once at least one returns +// true. +type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool + +// Calls each function until one of them returns true +func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool { + for _, f := range ff { + if f(level, msg, args...) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 55ce4396..22ebc57d 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -8,27 +8,55 @@ var ( protect sync.Once def Logger - // The options used to create the Default logger. These are - // read only when the Default logger is created, so set them - // as soon as the process starts. + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. DefaultOptions = &LoggerOptions{ Level: DefaultLevel, Output: DefaultOutput, } ) -// Return a logger that is held globally. This can be a good starting +// Default returns a globally held logger. This can be a good starting // place, and then you can use .With() and .Name() to create sub-loggers // to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. func Default() Logger { protect.Do(func() { - def = New(DefaultOptions) + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } }) return def } -// A short alias for Default() +// L is a short alias for Default(). func L() Logger { return Default() } + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod index 0d079a65..b6698c08 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.mod +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -2,6 +2,11 @@ module github.com/hashicorp/go-hclog require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.4 + github.com/mattn/go-isatty v0.0.10 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 ) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum index e03ee77d..3a656dfd 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.sum +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -1,6 +1,18 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go deleted file mode 100644 index 2aaa1f89..00000000 --- a/vendor/github.com/hashicorp/go-hclog/int.go +++ /dev/null @@ -1,507 +0,0 @@ -package hclog - -import ( - "bufio" - "bytes" - "encoding" - "encoding/json" - "fmt" - "log" - "os" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -var ( - _levelToBracket = map[Level]string{ - Debug: "[DEBUG]", - Trace: "[TRACE]", - Info: "[INFO] ", - Warn: "[WARN] ", - Error: "[ERROR]", - } -) - -// Given the options (nil for defaults), create a new Logger -func New(opts *LoggerOptions) Logger { - if opts == nil { - opts = &LoggerOptions{} - } - - output := opts.Output - if output == nil { - output = os.Stderr - } - - level := opts.Level - if level == NoLevel { - level = DefaultLevel - } - - mtx := opts.Mutex - if mtx == nil { - mtx = new(sync.Mutex) - } - - ret := &intLogger{ - m: mtx, - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - timeFormat: TimeFormat, - w: bufio.NewWriter(output), - level: new(int32), - } - if opts.TimeFormat != "" { - ret.timeFormat = opts.TimeFormat - } - atomic.StoreInt32(ret.level, int32(level)) - return ret -} - -// The internal logger implementation. Internal in that it is defined entirely -// by this package. -type intLogger struct { - json bool - caller bool - name string - timeFormat string - - // this is a pointer so that it's shared by any derived loggers, since - // those derived loggers share the bufio.Writer as well. - m *sync.Mutex - w *bufio.Writer - level *int32 - - implied []interface{} -} - -// Make sure that intLogger is a Logger -var _ Logger = &intLogger{} - -// The time format to use for logging. This is a version of RFC3339 that -// contains millisecond precision -const TimeFormat = "2006-01-02T15:04:05.000Z0700" - -// Log a message and a set of key/value pairs if the given level is at -// or more severe that the threshold configured in the Logger. -func (z *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(z.level)) { - return - } - - t := time.Now() - - z.m.Lock() - defer z.m.Unlock() - - if z.json { - z.logJson(t, level, msg, args...) - } else { - z.log(t, level, msg, args...) - } - - z.w.Flush() -} - -// Cleanup a path by returning the last 2 segments of the path only. -func trimCallerPath(path string) string { - // lovely borrowed from zap - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - - // Find the last separator. - // - idx := strings.LastIndexByte(path, '/') - if idx == -1 { - return path - } - - // Find the penultimate separator. - idx = strings.LastIndexByte(path[:idx], '/') - if idx == -1 { - return path - } - - return path[idx+1:] -} - -// Non-JSON logging format function -func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - z.w.WriteString(t.Format(z.timeFormat)) - z.w.WriteByte(' ') - - s, ok := _levelToBracket[level] - if ok { - z.w.WriteString(s) - } else { - z.w.WriteString("[?????]") - } - - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - z.w.WriteByte(' ') - z.w.WriteString(trimCallerPath(file)) - z.w.WriteByte(':') - z.w.WriteString(strconv.Itoa(line)) - z.w.WriteByte(':') - } - } - - z.w.WriteByte(' ') - - if z.name != "" { - z.w.WriteString(z.name) - z.w.WriteString(": ") - } - - z.w.WriteString(msg) - - args = append(z.implied, args...) - - var stacktrace CapturedStacktrace - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - cs, ok := args[len(args)-1].(CapturedStacktrace) - if ok { - args = args[:len(args)-1] - stacktrace = cs - } else { - args = append(args, "") - } - } - - z.w.WriteByte(':') - - FOR: - for i := 0; i < len(args); i = i + 2 { - var ( - val string - raw bool - ) - - switch st := args[i+1].(type) { - case string: - val = st - case int: - val = strconv.FormatInt(int64(st), 10) - case int64: - val = strconv.FormatInt(int64(st), 10) - case int32: - val = strconv.FormatInt(int64(st), 10) - case int16: - val = strconv.FormatInt(int64(st), 10) - case int8: - val = strconv.FormatInt(int64(st), 10) - case uint: - val = strconv.FormatUint(uint64(st), 10) - case uint64: - val = strconv.FormatUint(uint64(st), 10) - case uint32: - val = strconv.FormatUint(uint64(st), 10) - case uint16: - val = strconv.FormatUint(uint64(st), 10) - case uint8: - val = strconv.FormatUint(uint64(st), 10) - case CapturedStacktrace: - stacktrace = st - continue FOR - case Format: - val = fmt.Sprintf(st[0].(string), st[1:]...) - default: - v := reflect.ValueOf(st) - if v.Kind() == reflect.Slice { - val = z.renderSlice(v) - raw = true - } else { - val = fmt.Sprintf("%v", st) - } - } - - z.w.WriteByte(' ') - z.w.WriteString(args[i].(string)) - z.w.WriteByte('=') - - if !raw && strings.ContainsAny(val, " \t\n\r") { - z.w.WriteByte('"') - z.w.WriteString(val) - z.w.WriteByte('"') - } else { - z.w.WriteString(val) - } - } - } - - z.w.WriteString("\n") - - if stacktrace != "" { - z.w.WriteString(string(stacktrace)) - } -} - -func (z *intLogger) renderSlice(v reflect.Value) string { - var buf bytes.Buffer - - buf.WriteRune('[') - - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteString(", ") - } - - sv := v.Index(i) - - var val string - - switch sv.Kind() { - case reflect.String: - val = sv.String() - case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: - val = strconv.FormatInt(sv.Int(), 10) - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - val = strconv.FormatUint(sv.Uint(), 10) - default: - val = fmt.Sprintf("%v", sv.Interface()) - } - - if strings.ContainsAny(val, " \t\n\r") { - buf.WriteByte('"') - buf.WriteString(val) - buf.WriteByte('"') - } else { - buf.WriteString(val) - } - } - - buf.WriteRune(']') - - return buf.String() -} - -// JSON logging function -func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { - vals := map[string]interface{}{ - "@message": msg, - "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), - } - - var levelStr string - switch level { - case Error: - levelStr = "error" - case Warn: - levelStr = "warn" - case Info: - levelStr = "info" - case Debug: - levelStr = "debug" - case Trace: - levelStr = "trace" - default: - levelStr = "all" - } - - vals["@level"] = levelStr - - if z.name != "" { - vals["@module"] = z.name - } - - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - vals["@caller"] = fmt.Sprintf("%s:%d", file, line) - } - } - - args = append(z.implied, args...) - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - cs, ok := args[len(args)-1].(CapturedStacktrace) - if ok { - args = args[:len(args)-1] - vals["stacktrace"] = cs - } else { - args = append(args, "") - } - } - - for i := 0; i < len(args); i = i + 2 { - if _, ok := args[i].(string); !ok { - // As this is the logging function not much we can do here - // without injecting into logs... - continue - } - val := args[i+1] - switch sv := val.(type) { - case error: - // Check if val is of type error. If error type doesn't - // implement json.Marshaler or encoding.TextMarshaler - // then set val to err.Error() so that it gets marshaled - switch sv.(type) { - case json.Marshaler, encoding.TextMarshaler: - default: - val = sv.Error() - } - case Format: - val = fmt.Sprintf(sv[0].(string), sv[1:]...) - } - - vals[args[i].(string)] = val - } - } - - err := json.NewEncoder(z.w).Encode(vals) - if err != nil { - panic(err) - } -} - -// Emit the message and args at DEBUG level -func (z *intLogger) Debug(msg string, args ...interface{}) { - z.Log(Debug, msg, args...) -} - -// Emit the message and args at TRACE level -func (z *intLogger) Trace(msg string, args ...interface{}) { - z.Log(Trace, msg, args...) -} - -// Emit the message and args at INFO level -func (z *intLogger) Info(msg string, args ...interface{}) { - z.Log(Info, msg, args...) -} - -// Emit the message and args at WARN level -func (z *intLogger) Warn(msg string, args ...interface{}) { - z.Log(Warn, msg, args...) -} - -// Emit the message and args at ERROR level -func (z *intLogger) Error(msg string, args ...interface{}) { - z.Log(Error, msg, args...) -} - -// Indicate that the logger would emit TRACE level logs -func (z *intLogger) IsTrace() bool { - return Level(atomic.LoadInt32(z.level)) == Trace -} - -// Indicate that the logger would emit DEBUG level logs -func (z *intLogger) IsDebug() bool { - return Level(atomic.LoadInt32(z.level)) <= Debug -} - -// Indicate that the logger would emit INFO level logs -func (z *intLogger) IsInfo() bool { - return Level(atomic.LoadInt32(z.level)) <= Info -} - -// Indicate that the logger would emit WARN level logs -func (z *intLogger) IsWarn() bool { - return Level(atomic.LoadInt32(z.level)) <= Warn -} - -// Indicate that the logger would emit ERROR level logs -func (z *intLogger) IsError() bool { - return Level(atomic.LoadInt32(z.level)) <= Error -} - -// Return a sub-Logger for which every emitted log message will contain -// the given key/value pairs. This is used to create a context specific -// Logger. -func (z *intLogger) With(args ...interface{}) Logger { - if len(args)%2 != 0 { - panic("With() call requires paired arguments") - } - - var nz intLogger = *z - - result := make(map[string]interface{}, len(z.implied)+len(args)) - keys := make([]string, 0, len(z.implied)+len(args)) - - // Read existing args, store map and key for consistent sorting - for i := 0; i < len(z.implied); i += 2 { - key := z.implied[i].(string) - keys = append(keys, key) - result[key] = z.implied[i+1] - } - // Read new args, store map and key for consistent sorting - for i := 0; i < len(args); i += 2 { - key := args[i].(string) - _, exists := result[key] - if !exists { - keys = append(keys, key) - } - result[key] = args[i+1] - } - - // Sort keys to be consistent - sort.Strings(keys) - - nz.implied = make([]interface{}, 0, len(z.implied)+len(args)) - for _, k := range keys { - nz.implied = append(nz.implied, k) - nz.implied = append(nz.implied, result[k]) - } - - return &nz -} - -// Create a new sub-Logger that a name decending from the current name. -// This is used to create a subsystem specific Logger. -func (z *intLogger) Named(name string) Logger { - var nz intLogger = *z - - if nz.name != "" { - nz.name = nz.name + "." + name - } else { - nz.name = name - } - - return &nz -} - -// Create a new sub-Logger with an explicit name. This ignores the current -// name. This is used to create a standalone logger that doesn't fall -// within the normal hierarchy. -func (z *intLogger) ResetNamed(name string) Logger { - var nz intLogger = *z - - nz.name = name - - return &nz -} - -// Update the logging level on-the-fly. This will affect all subloggers as -// well. -func (z *intLogger) SetLevel(level Level) { - atomic.StoreInt32(z.level, int32(level)) -} - -// Create a *log.Logger that will send it's data through this Logger. This -// allows packages that expect to be using the standard library log to actually -// use this logger. -func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { - if opts == nil { - opts = &StandardLoggerOptions{} - } - - return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) -} diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 00000000..d8e2e76f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,235 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + return i.NamedIntercept(name) +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + return i.ResetNamedIntercept(name) +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.Named(name) + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.ResetNamed(name) + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + return i.StandardLogger(opts) +} + +func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(i.StandardWriter(opts), "", 0) +} + +func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return i.StandardWriter(opts) +} + +func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: i, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutput(opts) + } else { + return nil + } +} + +func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutputWithFlush(opts, flushable) + } else { + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 00000000..f961ed91 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,679 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/fatih/color" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is an interface so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex Locker + writer *writer + level *int32 + + implied []interface{} + + exclude func(level Level, msg string, args ...interface{}) bool + + // create subloggers with their own level setting + independentLevels bool +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + exclude: opts.Exclude, + independentLevels: opts.IndependentLevels, + } + + l.setColorization(opts) + + if opts.DisableTime { + l.timeFormat = "" + } else if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.exclude != nil && l.exclude(level, msg, args...) { + return + } + + if l.json { + l.logJSON(t, name, level, msg, args...) + } else { + l.logPlain(t, name, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + if len(l.timeFormat) > 0 { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + } + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + offset := 3 + if l.caller { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + if strings.HasSuffix(file, "intlogger.go") || strings.HasSuffix(file, "interceptlogger.go") { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if name != "" { + l.writer.WriteString(name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case Hex: + val = "0x" + strconv.FormatUint(uint64(st), 16) + case Octal: + val = "0" + strconv.FormatUint(uint64(st), 8) + case Binary: + val = "0b" + strconv.FormatUint(uint64(st), 2) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + switch st := args[i].(type) { + case string: + l.writer.WriteString(st) + default: + l.writer.WriteString(fmt.Sprintf("%s", st)) + } + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + l.writer.WriteString("\n") + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + for i := 0; i < len(args); i = i + 2 { + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, name, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if name != "" { + vals["@module"] = name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.log(l.Name(), Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.log(l.Name(), Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.log(l.Name(), Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.log(l.Name(), Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.log(l.Name(), Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +const MissingKey = "EXTRA_VALUE_AT_END" + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + + if len(args)%2 != 0 { + extra = args[len(args)-1] + args = args[:len(args)-1] + } + + sl := l.copy() + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := l.copy() + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := l.copy() + + sl.name = name + + return sl +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} + +// copy returns a shallow copy of the intLogger, replacing the level pointer +// when necessary +func (l *intLogger) copy() *intLogger { + sl := *l + + if l.independentLevels { + sl.level = new(int32) + *sl.level = *l.level + } + + return &sl +} diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go deleted file mode 100644 index d98714e0..00000000 --- a/vendor/github.com/hashicorp/go-hclog/log.go +++ /dev/null @@ -1,161 +0,0 @@ -package hclog - -import ( - "io" - "log" - "os" - "strings" - "sync" -) - -var ( - DefaultOutput = os.Stderr - DefaultLevel = Info -) - -type Level int32 - -const ( - // This is a special level used to indicate that no level has been - // set and allow for a default to be used. - NoLevel Level = 0 - - // The most verbose level. Intended to be used for the tracing of actions - // in code, such as function enters/exits, etc. - Trace Level = 1 - - // For programmer lowlevel analysis. - Debug Level = 2 - - // For information about steady state operations. - Info Level = 3 - - // For information about rare but handled events. - Warn Level = 4 - - // For information about unrecoverable events. - Error Level = 5 -) - -// When processing a value of this type, the logger automatically treats the first -// argument as a Printf formatting string and passes the rest as the values to be -// formatted. For example: L.Info(Fmt{"%d beans/day", beans}). This is a simple -// convience type for when formatting is required. -type Format []interface{} - -// Fmt returns a Format type. This is a convience function for creating a Format -// type. -func Fmt(str string, args ...interface{}) Format { - return append(Format{str}, args...) -} - -// LevelFromString returns a Level type for the named log level, or "NoLevel" if -// the level string is invalid. This facilitates setting the log level via -// config or environment variable by name in a predictable way. -func LevelFromString(levelStr string) Level { - // We don't care about case. Accept "INFO" or "info" - levelStr = strings.ToLower(strings.TrimSpace(levelStr)) - switch levelStr { - case "trace": - return Trace - case "debug": - return Debug - case "info": - return Info - case "warn": - return Warn - case "error": - return Error - default: - return NoLevel - } -} - -// The main Logger interface. All code should code against this interface only. -type Logger interface { - // Args are alternating key, val pairs - // keys must be strings - // vals can be any type, but display is implementation specific - // Emit a message and key/value pairs at the TRACE level - Trace(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the DEBUG level - Debug(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the INFO level - Info(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the WARN level - Warn(msg string, args ...interface{}) - - // Emit a message and key/value pairs at the ERROR level - Error(msg string, args ...interface{}) - - // Indicate if TRACE logs would be emitted. This and the other Is* guards - // are used to elide expensive logging code based on the current level. - IsTrace() bool - - // Indicate if DEBUG logs would be emitted. This and the other Is* guards - IsDebug() bool - - // Indicate if INFO logs would be emitted. This and the other Is* guards - IsInfo() bool - - // Indicate if WARN logs would be emitted. This and the other Is* guards - IsWarn() bool - - // Indicate if ERROR logs would be emitted. This and the other Is* guards - IsError() bool - - // Creates a sublogger that will always have the given key/value pairs - With(args ...interface{}) Logger - - // Create a logger that will prepend the name string on the front of all messages. - // If the logger already has a name, the new value will be appended to the current - // name. That way, a major subsystem can use this to decorate all it's own logs - // without losing context. - Named(name string) Logger - - // Create a logger that will prepend the name string on the front of all messages. - // This sets the name of the logger to the value directly, unlike Named which honor - // the current name as well. - ResetNamed(name string) Logger - - // Updates the level. This should affect all sub-loggers as well. If an - // implementation cannot update the level on the fly, it should no-op. - SetLevel(level Level) - - // Return a value that conforms to the stdlib log.Logger interface - StandardLogger(opts *StandardLoggerOptions) *log.Logger -} - -type StandardLoggerOptions struct { - // Indicate that some minimal parsing should be done on strings to try - // and detect their level and re-emit them. - // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], - // [DEBUG] and strip it off before reapplying it. - InferLevels bool -} - -type LoggerOptions struct { - // Name of the subsystem to prefix logs with - Name string - - // The threshold for the logger. Anything less severe is supressed - Level Level - - // Where to write the logs to. Defaults to os.Stderr if nil - Output io.Writer - - // An optional mutex pointer in case Output is shared - Mutex *sync.Mutex - - // Control if the output should be in JSON. - JSONFormat bool - - // Include file and line information in each log line - IncludeLocation bool - - // The time format to use instead of the default - TimeFormat string -} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 00000000..83eafc15 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,341 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 + + // Off disables all logging output. + Off Level = 6 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// A simple shortcut to format numbers in hex when displayed with the normal +// text output. For example: L.Info("header value", Hex(17)) +type Hex int + +// A simple shortcut to format numbers in octal when displayed with the normal +// text output. For example: L.Info("perms", Octal(17)) +type Octal int + +// A simple shortcut to format numbers in binary when displayed with the normal +// text output. For example: L.Info("bits", Binary(17)) +type Binary int + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + case "off": + return Off + default: + return NoLevel + } +} + +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case NoLevel: + return "none" + case Off: + return "off" + default: + return "unknown" + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Returns the Name of the logger + Name() string + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all related loggers as well, + // unless they were created with IndependentLevels. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional Locker in case Output is shared. This can be a sync.Mutex or + // a NoopLocker if the caller wants control over output, e.g. for batching + // log lines. + Mutex Locker + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string + + // Control whether or not to display the time at all. This is required + // because setting TimeFormat to empty assumes the default format. + DisableTime bool + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption + + // A function which is called with the log information and if it returns true the value + // should not be logged. + // This is useful when interacting with a system that you wish to suppress the log + // message for (because it's too noisy, etc) + Exclude func(level Level, msg string, args ...interface{}) bool + + // IndependentLevels causes subloggers to be created with an independent + // copy of this logger's level. This means that using SetLevel on this + // logger will not effect any subloggers, and SetLevel on any subloggers + // will not effect the parent or sibling loggers. + IndependentLevels bool +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Deprecated: use StandardLogger + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Deprecated: use StandardWriter + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} + +// Locker is used for locking output. If not set when creating a logger, a +// sync.Mutex will be used internally. +type Locker interface { + // Lock is called when the output is going to be changed or written to + Lock() + + // Unlock is called when the operation that called Lock() completes + Unlock() +} + +// NoopLocker implements locker but does nothing. This is useful if the client +// wants tight control over locking, in order to provide grouping of log +// entries or other functionality. +type NoopLocker struct{} + +// Lock does nothing +func (n NoopLocker) Lock() {} + +// Unlock does nothing +func (n NoopLocker) Unlock() {} + +var _ Locker = (*NoopLocker)(nil) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 0942361a..bc14f770 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -1,6 +1,7 @@ package hclog import ( + "io" "io/ioutil" "log" ) @@ -14,6 +15,8 @@ func NewNullLogger() Logger { type nullLogger struct{} +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + func (l *nullLogger) Trace(msg string, args ...interface{}) {} func (l *nullLogger) Debug(msg string, args ...interface{}) {} @@ -34,8 +37,12 @@ func (l *nullLogger) IsWarn() bool { return false } func (l *nullLogger) IsError() bool { return false } +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + func (l *nullLogger) With(args ...interface{}) Logger { return l } +func (l *nullLogger) Name() string { return "" } + func (l *nullLogger) Named(name string) Logger { return l } func (l *nullLogger) ResetNamed(name string) Logger { return l } @@ -43,5 +50,9 @@ func (l *nullLogger) ResetNamed(name string) Logger { return l } func (l *nullLogger) SetLevel(level Level) {} func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { - return log.New(ioutil.Discard, "", log.LstdFlags) + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard } diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go index 8af1a3be..9b27bd3d 100644 --- a/vendor/github.com/hashicorp/go-hclog/stacktrace.go +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -40,12 +40,13 @@ var ( } ) -// A stacktrace gathered by a previous call to log.Stacktrace. If passed -// to a logging function, the stacktrace will be appended. +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. type CapturedStacktrace string -// Gather a stacktrace of the current goroutine and return it to be passed -// to a logging function. +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. func Stacktrace() CapturedStacktrace { return CapturedStacktrace(takeStacktrace()) } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 2bb927fc..f35d875d 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -2,6 +2,7 @@ package hclog import ( "bytes" + "log" "strings" ) @@ -9,39 +10,51 @@ import ( // and back into our Logger. This is basically the only way to // build upon *log.Logger. type stdlogAdapter struct { - hl Logger + log Logger inferLevels bool + forceLevel Level } // Take the data, infer the levels if configured, and send it through -// a regular Logger +// a regular Logger. func (s *stdlogAdapter) Write(data []byte) (int, error) { str := string(bytes.TrimRight(data, " \t\n")) - if s.inferLevels { + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { level, str := s.pickLevel(str) - switch level { - case Trace: - s.hl.Trace(str) - case Debug: - s.hl.Debug(str) - case Info: - s.hl.Info(str) - case Warn: - s.hl.Warn(str) - case Error: - s.hl.Error(str) - default: - s.hl.Info(str) - } + s.dispatch(str, level) } else { - s.hl.Info(str) + s.log.Info(str) } return len(data), nil } -// Detect, based on conventions, what log level this is +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. func (s *stdlogAdapter) pickLevel(str string) (Level, string) { switch { case strings.HasPrefix(str, "[DEBUG]"): @@ -60,3 +73,23 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { return Info, str } } + +type logWriter struct { + l *log.Logger +} + +func (l *logWriter) Write(b []byte) (int, error) { + l.l.Println(string(bytes.TrimRight(b, " \n\t"))) + return len(b), nil +} + +// Takes a standard library logger and returns a Logger that will write to it +func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger { + var dl LoggerOptions = *opts + + // Use the time format that log.Logger uses + dl.DisableTime = true + dl.Output = &logWriter{l} + + return New(&dl) +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 00000000..421a1f06 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,82 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer + color ColorOption +} + +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} +} + +func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, unwritten) + } else { + _, err = w.w.Write(unwritten) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod index f0115b78..4e182e62 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -4,7 +4,7 @@ go 1.13 require ( github.com/golang/protobuf v1.3.4 - github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum index 5d497615..56062044 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -4,8 +4,12 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -17,12 +21,17 @@ github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= @@ -31,6 +40,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -52,6 +62,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go index 6231a9fd..a5821815 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -136,12 +136,12 @@ func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { status.Code(err) == codes.Canceled || status.Code(err) == codes.Unimplemented || err == context.Canceled { - c.log.Warn("received EOF, stopping recv loop", "err", err) + c.log.Debug("received EOF, stopping recv loop", "err", err) return } c.log.Error("error receiving data", "err", err) - continue + return } // Determine our output writer based on channel diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 002d6080..80f0ac39 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -9,7 +9,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "net" "os" "os/signal" @@ -260,9 +259,6 @@ func Serve(opts *ServeConfig) { // start with default version in the handshake config protoVersion, protoType, pluginSet := protocolVersion(opts) - // Logging goes to the original stderr - log.SetOutput(os.Stderr) - logger := opts.Logger if logger == nil { // internal logger to os.Stderr diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore deleted file mode 100644 index caab963a..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea/ -*.iml -*.test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml deleted file mode 100644 index 2df4e7df..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.8.1 - -branches: - only: - - master - -script: make updatedeps test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile deleted file mode 100644 index da17640e..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: - go vet ./... - go test -race ./... - -updatedeps: - go get -f -t -u ./... - go get -f -u ./... - -.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md deleted file mode 100644 index ccdc7e87..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ /dev/null @@ -1,46 +0,0 @@ -go-retryablehttp -================ - -[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: http://travis-ci.org/hashicorp/go-retryablehttp -[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp - -The `retryablehttp` package provides a familiar HTTP client interface with -automatic retries and exponential backoff. It is a thin wrapper over the -standard `net/http` client library and exposes nearly the same public API. This -makes `retryablehttp` very easy to drop into existing programs. - -`retryablehttp` performs automatic retries under certain conditions. Mainly, if -an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received (except 501), then a retry is invoked after a wait -period. Otherwise, the response is returned and left to the caller to -interpret. - -The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) can have the body provided in a number of ways (some more or -less efficient) that allow "rewinding" the request body if the initial request -fails so that the full request can be attempted again. See the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more -details. - -Example Use -=========== - -Using this library should look almost identical to what you would do with -`net/http`. The most simple example of a GET request is shown below: - -```go -resp, err := retryablehttp.Get("/foo") -if err != nil { - panic(err) -} -``` - -The returned response object is an `*http.Response`, the same thing you would -usually get from `net/http`. Had the request failed one or more times, the above -call would block and retry with exponential backoff. - -For more usage and examples see the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go deleted file mode 100644 index 15f1e885..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ /dev/null @@ -1,528 +0,0 @@ -// The retryablehttp package provides a familiar HTTP client interface with -// automatic retries and exponential backoff. It is a thin wrapper over the -// standard net/http client library and exposes nearly the same public API. -// This makes retryablehttp very easy to drop into existing programs. -// -// retryablehttp performs automatic retries under certain conditions. Mainly, if -// an error is returned by the client (connection errors etc), or if a 500-range -// response is received, then a retry is invoked. Otherwise, the response is -// returned and left to the caller to interpret. -// -// Requests which take a request body should provide a non-nil function -// parameter. The best choice is to provide either a function satisfying -// ReaderFunc which provides multiple io.Readers in an efficient manner, a -// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte -// slice. As it is a reference type, and we will wrap it as needed by readers, -// we can efficiently re-use the request body without needing to copy it. If an -// io.Reader (such as a *bytes.Reader) is provided, the full body will be read -// prior to the first request, and will be efficiently re-used for any retries. -// ReadSeeker can be used, but some users have observed occasional data races -// between the net/http library and the Seek functionality of some -// implementations of ReadSeeker, so should be avoided if possible. -package retryablehttp - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "math/rand" - "net/http" - "net/url" - "os" - "strings" - "time" - - cleanhttp "github.com/hashicorp/go-cleanhttp" -) - -var ( - // Default retry configuration - defaultRetryWaitMin = 1 * time.Second - defaultRetryWaitMax = 30 * time.Second - defaultRetryMax = 4 - - // defaultClient is used for performing requests without explicitly making - // a new client. It is purposely private to avoid modifications. - defaultClient = NewClient() - - // We need to consume response bodies to maintain http connections, but - // limit the size we consume to respReadLimit. - respReadLimit = int64(4096) -) - -// ReaderFunc is the type of function that can be given natively to NewRequest -type ReaderFunc func() (io.Reader, error) - -// LenReader is an interface implemented by many in-memory io.Reader's. Used -// for automatically sending the right Content-Length header when possible. -type LenReader interface { - Len() int -} - -// Request wraps the metadata needed to create HTTP requests. -type Request struct { - // body is a seekable reader over the request body payload. This is - // used to rewind the request data in between retries. - body ReaderFunc - - // Embed an HTTP request directly. This makes a *Request act exactly - // like an *http.Request so that all meta methods are supported. - *http.Request -} - -// WithContext returns wrapped Request with a shallow copy of underlying *http.Request -// with its context changed to ctx. The provided ctx must be non-nil. -func (r *Request) WithContext(ctx context.Context) *Request { - r.Request = r.Request.WithContext(ctx) - return r -} - -// BodyBytes allows accessing the request body. It is an analogue to -// http.Request's Body variable, but it returns a copy of the underlying data -// rather than consuming it. -// -// This function is not thread-safe; do not call it at the same time as another -// call, or at the same time this request is being used with Client.Do. -func (r *Request) BodyBytes() ([]byte, error) { - if r.body == nil { - return nil, nil - } - body, err := r.body() - if err != nil { - return nil, err - } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(body) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// NewRequest creates a new wrapped request. -func NewRequest(method, url string, rawBody interface{}) (*Request, error) { - var err error - var body ReaderFunc - var contentLength int64 - - if rawBody != nil { - switch rawBody.(type) { - // If they gave us a function already, great! Use it. - case ReaderFunc: - body = rawBody.(ReaderFunc) - tmp, err := body() - if err != nil { - return nil, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - case func() (io.Reader, error): - body = rawBody.(func() (io.Reader, error)) - tmp, err := body() - if err != nil { - return nil, err - } - if lr, ok := tmp.(LenReader); ok { - contentLength = int64(lr.Len()) - } - if c, ok := tmp.(io.Closer); ok { - c.Close() - } - - // If a regular byte slice, we can read it over and over via new - // readers - case []byte: - buf := rawBody.([]byte) - body = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // If a bytes.Buffer we can read the underlying byte slice over and - // over - case *bytes.Buffer: - buf := rawBody.(*bytes.Buffer) - body = func() (io.Reader, error) { - return bytes.NewReader(buf.Bytes()), nil - } - contentLength = int64(buf.Len()) - - // We prioritize *bytes.Reader here because we don't really want to - // deal with it seeking so want it to match here instead of the - // io.ReadSeeker case. - case *bytes.Reader: - buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader)) - if err != nil { - return nil, err - } - body = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - // Compat case - case io.ReadSeeker: - raw := rawBody.(io.ReadSeeker) - body = func() (io.Reader, error) { - raw.Seek(0, 0) - return ioutil.NopCloser(raw), nil - } - if lr, ok := raw.(LenReader); ok { - contentLength = int64(lr.Len()) - } - - // Read all in so we can reset - case io.Reader: - buf, err := ioutil.ReadAll(rawBody.(io.Reader)) - if err != nil { - return nil, err - } - body = func() (io.Reader, error) { - return bytes.NewReader(buf), nil - } - contentLength = int64(len(buf)) - - default: - return nil, fmt.Errorf("cannot handle type %T", rawBody) - } - } - - httpReq, err := http.NewRequest(method, url, nil) - if err != nil { - return nil, err - } - httpReq.ContentLength = contentLength - - return &Request{body, httpReq}, nil -} - -// Logger interface allows to use other loggers than -// standard log.Logger. -type Logger interface { - Printf(string, ...interface{}) -} - -// RequestLogHook allows a function to run before each retry. The HTTP -// request which will be made, and the retry number (0 for the initial -// request) are available to users. The internal logger is exposed to -// consumers. -type RequestLogHook func(Logger, *http.Request, int) - -// ResponseLogHook is like RequestLogHook, but allows running a function -// on each HTTP response. This function will be invoked at the end of -// every HTTP request executed, regardless of whether a subsequent retry -// needs to be performed or not. If the response body is read or closed -// from this method, this will affect the response returned from Do(). -type ResponseLogHook func(Logger, *http.Response) - -// CheckRetry specifies a policy for handling retries. It is called -// following each request with the response and error values returned by -// the http.Client. If CheckRetry returns false, the Client stops retrying -// and returns the response to the caller. If CheckRetry returns an error, -// that error value is returned in lieu of the error from the request. The -// Client will close any response body when retrying, but if the retry is -// aborted it is up to the CheckResponse callback to properly close any -// response body before returning. -type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) - -// Backoff specifies a policy for how long to wait between retries. -// It is called after a failing request to determine the amount of time -// that should pass before trying again. -type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration - -// ErrorHandler is called if retries are expired, containing the last status -// from the http library. If not specified, default behavior for the library is -// to close the body and return an error indicating how many tries were -// attempted. If overriding this, be sure to close the body if needed. -type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) - -// Client is used to make HTTP requests. It adds additional functionality -// like automatic retries to tolerate minor outages. -type Client struct { - HTTPClient *http.Client // Internal HTTP client. - Logger Logger // Customer logger instance. - - RetryWaitMin time.Duration // Minimum time to wait - RetryWaitMax time.Duration // Maximum time to wait - RetryMax int // Maximum number of retries - - // RequestLogHook allows a user-supplied function to be called - // before each retry. - RequestLogHook RequestLogHook - - // ResponseLogHook allows a user-supplied function to be called - // with the response from each HTTP request executed. - ResponseLogHook ResponseLogHook - - // CheckRetry specifies the policy for handling retries, and is called - // after each request. The default policy is DefaultRetryPolicy. - CheckRetry CheckRetry - - // Backoff specifies the policy for how long to wait between retries - Backoff Backoff - - // ErrorHandler specifies the custom error handler to use, if any - ErrorHandler ErrorHandler -} - -// NewClient creates a new Client with default settings. -func NewClient() *Client { - return &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Logger: log.New(os.Stderr, "", log.LstdFlags), - RetryWaitMin: defaultRetryWaitMin, - RetryWaitMax: defaultRetryWaitMax, - RetryMax: defaultRetryMax, - CheckRetry: DefaultRetryPolicy, - Backoff: DefaultBackoff, - } -} - -// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which -// will retry on connection errors and server errors. -func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { - // do not retry on context.Canceled or context.DeadlineExceeded - if ctx.Err() != nil { - return false, ctx.Err() - } - - if err != nil { - return true, err - } - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. This will catch - // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { - return true, nil - } - - return false, nil -} - -// DefaultBackoff provides a default callback for Client.Backoff which -// will perform exponential backoff based on the attempt number and limited -// by the provided minimum and maximum durations. -func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - mult := math.Pow(2, float64(attemptNum)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} - -// LinearJitterBackoff provides a callback for Client.Backoff which will -// perform linear backoff based on the attempt number and with jitter to -// prevent a thundering herd. -// -// min and max here are *not* absolute values. The number to be multipled by -// the attempt number will be chosen at random from between them, thus they are -// bounding the jitter. -// -// For instance: -// * To get strictly linear backoff of one second increasing each retry, set -// both to one second (1s, 2s, 3s, 4s, ...) -// * To get a small amount of jitter centered around one second increasing each -// retry, set to around one second, such as a min of 800ms and max of 1200ms -// (892ms, 2102ms, 2945ms, 4312ms, ...) -// * To get extreme jitter, set to a very wide spread, such as a min of 100ms -// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) -func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - // attemptNum always starts at zero but we want to start at 1 for multiplication - attemptNum++ - - if max <= min { - // Unclear what to do here, or they are the same, so return min * - // attemptNum - return min * time.Duration(attemptNum) - } - - // Seed rand; doing this every time is fine - rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - - // Pick a random number that lies somewhere between the min and max and - // multiply by the attemptNum. attemptNum starts at zero so we always - // increment here. We first get a random percentage, then apply that to the - // difference between min and max, and add to min. - jitter := rand.Float64() * float64(max-min) - jitterMin := int64(jitter) + int64(min) - return time.Duration(jitterMin * int64(attemptNum)) -} - -// PassthroughErrorHandler is an ErrorHandler that directly passes through the -// values from the net/http library for the final request. The body is not -// closed. -func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { - return resp, err -} - -// Do wraps calling an HTTP method with retries. -func (c *Client) Do(req *Request) (*http.Response, error) { - if c.Logger != nil { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) - } - - var resp *http.Response - var err error - - for i := 0; ; i++ { - var code int // HTTP response code - - // Always rewind the request body when non-nil. - if req.body != nil { - body, err := req.body() - if err != nil { - return resp, err - } - if c, ok := body.(io.ReadCloser); ok { - req.Request.Body = c - } else { - req.Request.Body = ioutil.NopCloser(body) - } - } - - if c.RequestLogHook != nil { - c.RequestLogHook(c.Logger, req.Request, i) - } - - // Attempt the request - resp, err = c.HTTPClient.Do(req.Request) - if resp != nil { - code = resp.StatusCode - } - - // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err) - - if err != nil { - if c.Logger != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) - } - } else { - // Call this here to maintain the behavior of logging all requests, - // even if CheckRetry signals to stop. - if c.ResponseLogHook != nil { - // Call the response logger function if provided. - c.ResponseLogHook(c.Logger, resp) - } - } - - // Now decide if we should continue. - if !checkOK { - if checkErr != nil { - err = checkErr - } - return resp, err - } - - // We do this before drainBody beause there's no need for the I/O if - // we're breaking out - remain := c.RetryMax - i - if remain <= 0 { - break - } - - // We're going to retry, consume any response to reuse the connection. - if err == nil && resp != nil { - c.drainBody(resp.Body) - } - - wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) - desc := fmt.Sprintf("%s %s", req.Method, req.URL) - if code > 0 { - desc = fmt.Sprintf("%s (status: %d)", desc, code) - } - if c.Logger != nil { - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) - } - time.Sleep(wait) - } - - if c.ErrorHandler != nil { - return c.ErrorHandler(resp, err, c.RetryMax+1) - } - - // By default, we close the response body and return an error without - // returning the response - if resp != nil { - resp.Body.Close() - } - return nil, fmt.Errorf("%s %s giving up after %d attempts", - req.Method, req.URL, c.RetryMax+1) -} - -// Try to read the response body so we can reuse this connection. -func (c *Client) drainBody(body io.ReadCloser) { - defer body.Close() - _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) - if err != nil { - if c.Logger != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) - } - } -} - -// Get is a shortcut for doing a GET request without making a new client. -func Get(url string) (*http.Response, error) { - return defaultClient.Get(url) -} - -// Get is a convenience helper for doing simple GET requests. -func (c *Client) Get(url string) (*http.Response, error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Head is a shortcut for doing a HEAD request without making a new client. -func Head(url string) (*http.Response, error) { - return defaultClient.Head(url) -} - -// Head is a convenience method for doing simple HEAD requests. -func (c *Client) Head(url string) (*http.Response, error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body interface{}) (*http.Response, error) { - return defaultClient.Post(url, bodyType, body) -} - -// Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return c.Do(req) -} - -// PostForm is a shortcut to perform a POST with form data without creating -// a new client. -func PostForm(url string, data url.Values) (*http.Response, error) { - return defaultClient.PostForm(url, data) -} - -// PostForm is a convenience method for doing simple POST operations using -// pre-filled url.Values form data. -func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod deleted file mode 100644 index d28c8c8e..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/hashicorp/go-retryablehttp - -require github.com/hashicorp/go-cleanhttp v0.5.0 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum deleted file mode 100644 index 3ed0fd98..00000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml deleted file mode 100644 index 01c5dc21..00000000 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - - 1.4 - - 1.9 - - "1.10" - - 1.11 - - 1.12 - -script: - - go test diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 6f3a15ce..851a337b 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,6 @@ # Versioning Library for Go -[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 1032c560..09703e8e 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -280,6 +280,10 @@ func comparePrereleases(v string, other string) int { // Equal tests if two versions are equal. func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + return v.Compare(o) == 0 } @@ -288,7 +292,7 @@ func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } -// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. +// GreaterThanOrEqual tests if this version is greater than or equal to another version. func (v *Version) GreaterThanOrEqual(o *Version) bool { return v.Compare(o) >= 0 } @@ -298,7 +302,7 @@ func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } -// LessThanOrEqualTo tests if this version is less than or equal to another version. +// LessThanOrEqual tests if this version is less than or equal to another version. func (v *Version) LessThanOrEqual(o *Version) bool { return v.Compare(o) <= 0 } diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index be2cc4df..00000000 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 5673773b..00000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,161 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("Must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - return -} - -// Contains checks if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element - if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go deleted file mode 100644 index 74c70774..00000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ /dev/null @@ -1,36 +0,0 @@ -package simplelru - -// LRUCache is the interface for simple LRU cache. -type LRUCache interface { - // Adds a value to the cache, returns true if an eviction occurred and - // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool - - // Returns key's value from the cache and - // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) - - // Check if a key exsists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) - - // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) - - // Removes a key from the cache. - Remove(key interface{}) bool - - // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) - - // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) - - // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} - - // Returns the number of items in the cache. - Len() int - - // Clear all cache entries - Purge() -} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b..00000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index 3f83d902..00000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.8 - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f..00000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326..00000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index 0b39c1b9..00000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,724 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - field.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, field); err != nil { - return err - } - } - - decodedFields = append(decodedFields, fieldType.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b5..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381d..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index b4881806..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 69662367..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,651 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // If we see a null character with data left, then that is an error - if ch == '\x00' && s.buf.Len() > 0 { - s.err("unexpected null character (0x00)") - return eof - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664..00000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f07..00000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index dd5c72bb..00000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72..00000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3ee..00000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c29..00000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4..00000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md deleted file mode 100644 index f59ce92e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md +++ /dev/null @@ -1,184 +0,0 @@ -# HCL Dynamic Blocks Extension - -This HCL extension implements a special block type named "dynamic" that can -be used to dynamically generate blocks of other types by iterating over -collection values. - -Normally the block structure in an HCL configuration file is rigid, even -though dynamic expressions can be used within attribute values. This is -convenient for most applications since it allows the overall structure of -the document to be decoded easily, but in some applications it is desirable -to allow dynamic block generation within certain portions of the configuration. - -Dynamic block generation is performed using the `dynamic` block type: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - dynamic "nested" { - for_each = ["a", "b", "c"] - iterator = nested - content { - foo = "dynamic block ${nested.value}" - } - } - - nested { - foo = "static block 2" - } -} -``` - -The above is interpreted as if it were written as follows: - -```hcl -toplevel { - nested { - foo = "static block 1" - } - - nested { - foo = "dynamic block a" - } - - nested { - foo = "dynamic block b" - } - - nested { - foo = "dynamic block c" - } - - nested { - foo = "static block 2" - } -} -``` - -Since HCL block syntax is not normally exposed to the possibility of unknown -values, this extension must make some compromises when asked to iterate over -an unknown collection. If the length of the collection cannot be statically -recognized (because it is an unknown value of list, map, or set type) then -the `dynamic` construct will generate a _single_ dynamic block whose iterator -key and value are both unknown values of the dynamic pseudo-type, thus causing -any attribute values derived from iteration to appear as unknown values. There -is no explicit representation of the fact that the length of the collection may -eventually be different than one. - -## Usage - -Pass a body to function `Expand` to obtain a new body that will, on access -to its content, evaluate and expand any nested `dynamic` blocks. -Dynamic block processing is also automatically propagated into any nested -blocks that are returned, allowing users to nest dynamic blocks inside -one another and to nest dynamic blocks inside other static blocks. - -HCL structural decoding does not normally have access to an `EvalContext`, so -any variables and functions that should be available to the `for_each` -and `labels` expressions must be passed in when calling `Expand`. Expressions -within the `content` block are evaluated separately and so can be passed a -separate `EvalContext` if desired, during normal attribute expression -evaluation. - -## Detecting Variables - -Some applications dynamically generate an `EvalContext` by analyzing which -variables are referenced by an expression before evaluating it. - -This unfortunately requires some extra effort when this analysis is required -for the context passed to `Expand`: the HCL API requires a schema to be -provided in order to do any analysis of the blocks in a body, but the low-level -schema model provides a description of only one level of nested blocks at -a time, and thus a new schema must be provided for each additional level of -nesting. - -To make this arduous process as convenient as possible, this package provides -a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` -instance that can be used to find variables directly in a given body and also -determine which nested blocks require recursive calls. Using this mechanism -requires that the caller be able to look up a schema given a nested block type. -For _simple_ formats where a specific block type name always has the same schema -regardless of context, a walk can be implemented as follows: - -```go -func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { - vars, children := node.Visit(schema) - - for _, child := range children { - var childSchema *hcl.BodySchema - switch child.BlockTypeName { - case "a": - childSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "b", - LabelNames: []string{"key"}, - }, - }, - } - case "b": - childSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "val", - Required: true, - }, - }, - } - default: - // Should never happen, because the above cases should be exhaustive - // for the application's configuration format. - panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) - } - - vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) - } -} -``` - -### Detecting Variables with `hcldec` Specifications - -For applications that use the higher-level `hcldec` package to decode nested -configuration structures into `cty` values, the same specification can be used -to automatically drive the recursive variable-detection walk described above. - -The helper function `ForEachVariablesHCLDec` allows an entire recursive -configuration structure to be analyzed in a single call given a `hcldec.Spec` -that describes the nested block structure. This means a `hcldec`-based -application can support dynamic blocks with only a little additional effort: - -```go -func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { - // Determine which variables are needed to expand dynamic blocks - neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) - - // Build a suitable EvalContext and expand dynamic blocks - dynCtx := buildEvalContext(neededForDynamic) - dynBody := dynblock.Expand(body, dynCtx) - - // Determine which variables are needed to fully decode the expanded body - // This will analyze expressions that came both from static blocks in the - // original body and from blocks that were dynamically added by Expand. - neededForDecode := hcldec.Variables(dynBody, spec) - - // Build a suitable EvalContext and then fully decode the body as per the - // hcldec specification. - decCtx := buildEvalContext(neededForDecode) - return hcldec.Decode(dynBody, spec, decCtx) -} - -func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { - // (to be implemented by your application) -} -``` - -# Performance - -This extension is going quite harshly against the grain of the HCL API, and -so it uses lots of wrapping objects and temporary data structures to get its -work done. HCL in general is not suitable for use in high-performance situations -or situations sensitive to memory pressure, but that is _especially_ true for -this extension. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go deleted file mode 100644 index 65a9eab2..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go +++ /dev/null @@ -1,262 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// expandBody wraps another hcl.Body and expands any "dynamic" blocks found -// inside whenever Content or PartialContent is called. -type expandBody struct { - original hcl.Body - forEachCtx *hcl.EvalContext - iteration *iteration // non-nil if we're nested inside another "dynamic" block - - // These are used with PartialContent to produce a "remaining items" - // body to return. They are nil on all bodies fresh out of the transformer. - // - // Note that this is re-implemented here rather than delegating to the - // existing support required by the underlying body because we need to - // retain access to the entire original body on subsequent decode operations - // so we can retain any "dynamic" blocks for types we didn't take consume - // on the first pass. - hiddenAttrs map[string]struct{} - hiddenBlocks map[string]hcl.BlockHeaderSchema -} - -func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, diags := b.original.Content(extSchema) - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - return content, diags -} - -func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - extSchema := b.extendSchema(schema) - rawContent, _, diags := b.original.PartialContent(extSchema) - // We discard the "remain" argument above because we're going to construct - // our own remain that also takes into account remaining "dynamic" blocks. - - blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) - diags = append(diags, blockDiags...) - attrs := b.prepareAttributes(rawContent.Attributes) - - content := &hcl.BodyContent{ - Attributes: attrs, - Blocks: blocks, - MissingItemRange: b.original.MissingItemRange(), - } - - remain := &expandBody{ - original: b.original, - forEachCtx: b.forEachCtx, - iteration: b.iteration, - hiddenAttrs: make(map[string]struct{}), - hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), - } - for name := range b.hiddenAttrs { - remain.hiddenAttrs[name] = struct{}{} - } - for typeName, blockS := range b.hiddenBlocks { - remain.hiddenBlocks[typeName] = blockS - } - for _, attrS := range schema.Attributes { - remain.hiddenAttrs[attrS.Name] = struct{}{} - } - for _, blockS := range schema.Blocks { - remain.hiddenBlocks[blockS.Type] = blockS - } - - return content, remain, diags -} - -func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - // If we have any hiddenBlocks then we also need to register those here - // so that a call to "Content" on the underlying body won't fail. - // (We'll filter these out again once we process the result of either - // Content or PartialContent.) - for _, blockS := range b.hiddenBlocks { - extSchema.Blocks = append(extSchema.Blocks, blockS) - } - - // If we have any hiddenAttrs then we also need to register these, for - // the same reason as we deal with hiddenBlocks above. - if len(b.hiddenAttrs) != 0 { - newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) - copy(newAttrs, extSchema.Attributes) - for name := range b.hiddenAttrs { - newAttrs = append(newAttrs, hcl.AttributeSchema{ - Name: name, - Required: false, - }) - } - extSchema.Attributes = newAttrs - } - - return extSchema -} - -func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { - if len(b.hiddenAttrs) == 0 && b.iteration == nil { - // Easy path: just pass through the attrs from the original body verbatim - return rawAttrs - } - - // Otherwise we have some work to do: we must filter out any attributes - // that are hidden (since a previous PartialContent call already saw these) - // and wrap the expressions of the inner attributes so that they will - // have access to our iteration variables. - attrs := make(hcl.Attributes, len(rawAttrs)) - for name, rawAttr := range rawAttrs { - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - if b.iteration != nil { - attr := *rawAttr // shallow copy so we can mutate it - attr.Expr = exprWrap{ - Expression: attr.Expr, - i: b.iteration, - } - attrs[name] = &attr - } else { - // If we have no active iteration then no wrapping is required. - attrs[name] = rawAttr - } - } - return attrs -} - -func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { - var blocks hcl.Blocks - var diags hcl.Diagnostics - - for _, rawBlock := range rawBlocks { - switch rawBlock.Type { - case "dynamic": - realBlockType := rawBlock.Labels[0] - if _, hidden := b.hiddenBlocks[realBlockType]; hidden { - continue - } - - var blockS *hcl.BlockHeaderSchema - for _, candidate := range schema.Blocks { - if candidate.Type == realBlockType { - blockS = &candidate - break - } - } - if blockS == nil { - // Not a block type that the caller requested. - if !partial { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported block type", - Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), - Subject: &rawBlock.LabelRanges[0], - }) - } - continue - } - - spec, specDiags := b.decodeSpec(blockS, rawBlock) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - continue - } - - if spec.forEachVal.IsKnown() { - for it := spec.forEachVal.ElementIterator(); it.Next(); { - key, value := it.Element() - i := b.iteration.MakeChild(spec.iteratorName, key, value) - - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - // Attach our new iteration context so that attributes - // and other nested blocks can refer to our iterator. - block.Body = b.expandChild(block.Body, i) - blocks = append(blocks, block) - } - } - } else { - // If our top-level iteration value isn't known then we're forced - // to compromise since HCL doesn't have any concept of an - // "unknown block". In this case then, we'll produce a single - // dynamic block with the iterator values set to DynamicVal, - // which at least makes the potential for a block visible - // in our result, even though it's not represented in a fully-accurate - // way. - i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) - block, blockDiags := spec.newBlock(i, b.forEachCtx) - diags = append(diags, blockDiags...) - if block != nil { - block.Body = b.expandChild(block.Body, i) - - // We additionally force all of the leaf attribute values - // in the result to be unknown so the calling application - // can, if necessary, use that as a heuristic to detect - // when a single nested block might be standing in for - // multiple blocks yet to be expanded. This retains the - // structure of the generated body but forces all of its - // leaf attribute values to be unknown. - block.Body = unknownBody{block.Body} - - blocks = append(blocks, block) - } - } - - default: - if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { - // A static block doesn't create a new iteration context, but - // it does need to inherit _our own_ iteration context in - // case it contains expressions that refer to our inherited - // iterators, or nested "dynamic" blocks. - expandedBlock := *rawBlock // shallow copy - expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) - blocks = append(blocks, &expandedBlock) - } - } - } - - return blocks, diags -} - -func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { - chiCtx := i.EvalContext(b.forEachCtx) - ret := Expand(child, chiCtx) - ret.(*expandBody).iteration = i - return ret -} - -func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // blocks aren't allowed in JustAttributes mode and this body can - // only produce blocks, so we'll just pass straight through to our - // underlying body here. - return b.original.JustAttributes() -} - -func (b *expandBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go deleted file mode 100644 index 98a51ead..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go +++ /dev/null @@ -1,215 +0,0 @@ -package dynblock - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -type expandSpec struct { - blockType string - blockTypeRange hcl.Range - defRange hcl.Range - forEachVal cty.Value - iteratorName string - labelExprs []hcl.Expression - contentBody hcl.Body - inherited map[string]*iteration -} - -func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var schema *hcl.BodySchema - if len(blockS.LabelNames) != 0 { - schema = dynamicBlockBodySchemaLabels - } else { - schema = dynamicBlockBodySchemaNoLabels - } - - specContent, specDiags := rawSpec.Body.Content(schema) - diags = append(diags, specDiags...) - if specDiags.HasErrors() { - return nil, diags - } - - //// for_each attribute - - eachAttr := specContent.Attributes["for_each"] - eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) - diags = append(diags, eachDiags...) - - if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { - // We skip this error for DynamicPseudoType because that means we either - // have a null (which is checked immediately below) or an unknown - // (which is handled in the expandBody Content methods). - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - if eachVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic for_each value", - Detail: "Cannot use a null value in for_each.", - Subject: eachAttr.Expr.Range().Ptr(), - Expression: eachAttr.Expr, - EvalContext: b.forEachCtx, - }) - return nil, diags - } - - //// iterator attribute - - iteratorName := blockS.Type - if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { - itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) - diags = append(diags, itDiags...) - if itDiags.HasErrors() { - return nil, diags - } - - if len(itTraversal) != 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic iterator name", - Detail: "Dynamic iterator must be a single variable name.", - Subject: itTraversal.SourceRange().Ptr(), - }) - return nil, diags - } - - iteratorName = itTraversal.RootName() - } - - var labelExprs []hcl.Expression - if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { - var labelDiags hcl.Diagnostics - labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - if len(labelExprs) > len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic block label", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), - }) - return nil, diags - } else if len(labelExprs) < len(blockS.LabelNames) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Insufficient dynamic block labels", - Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), - Subject: labelsAttr.Expr.Range().Ptr(), - }) - return nil, diags - } - } - - // Since our schema requests only blocks of type "content", we can assume - // that all entries in specContent.Blocks are content blocks. - if len(specContent.Blocks) == 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing dynamic content block", - Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", - Subject: &specContent.MissingItemRange, - }) - return nil, diags - } - if len(specContent.Blocks) > 1 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous dynamic content block", - Detail: "Only one nested content block is allowed for each dynamic block.", - Subject: &specContent.Blocks[1].DefRange, - }) - return nil, diags - } - - return &expandSpec{ - blockType: blockS.Type, - blockTypeRange: rawSpec.LabelRanges[0], - defRange: rawSpec.DefRange, - forEachVal: eachVal, - iteratorName: iteratorName, - labelExprs: labelExprs, - contentBody: specContent.Blocks[0].Body, - }, diags -} - -func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { - var diags hcl.Diagnostics - var labels []string - var labelRanges []hcl.Range - lCtx := i.EvalContext(ctx) - for _, labelExpr := range s.labelExprs { - labelVal, labelDiags := labelExpr.Value(lCtx) - diags = append(diags, labelDiags...) - if labelDiags.HasErrors() { - return nil, diags - } - - var convErr error - labelVal, convErr = convert.Convert(labelVal, cty.String) - if convErr != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if labelVal.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "Cannot use a null value as a dynamic block label.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - if !labelVal.IsKnown() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid dynamic block label", - Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", - Subject: labelExpr.Range().Ptr(), - Expression: labelExpr, - EvalContext: lCtx, - }) - return nil, diags - } - - labels = append(labels, labelVal.AsString()) - labelRanges = append(labelRanges, labelExpr.Range()) - } - - block := &hcl.Block{ - Type: s.blockType, - TypeRange: s.blockTypeRange, - Labels: labels, - LabelRanges: labelRanges, - DefRange: s.defRange, - Body: s.contentBody, - } - - return block, diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go deleted file mode 100644 index 460a1d2a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go +++ /dev/null @@ -1,42 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type exprWrap struct { - hcl.Expression - i *iteration -} - -func (e exprWrap) Variables() []hcl.Traversal { - raw := e.Expression.Variables() - ret := make([]hcl.Traversal, 0, len(raw)) - - // Filter out traversals that refer to our iterator name or any - // iterator we've inherited; we're going to provide those in - // our Value wrapper, so the caller doesn't need to know about them. - for _, traversal := range raw { - rootName := traversal.RootName() - if rootName == e.i.IteratorName { - continue - } - if _, inherited := e.i.Inherited[rootName]; inherited { - continue - } - ret = append(ret, traversal) - } - return ret -} - -func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - extCtx := e.i.EvalContext(ctx) - return e.Expression.Value(extCtx) -} - -// UnwrapExpression returns the expression being wrapped by this instance. -// This allows the original expression to be recovered by hcl.UnwrapExpression. -func (e exprWrap) UnwrapExpression() hcl.Expression { - return e.Expression -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go deleted file mode 100644 index c5663886..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go +++ /dev/null @@ -1,66 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -type iteration struct { - IteratorName string - Key cty.Value - Value cty.Value - Inherited map[string]*iteration -} - -func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { - return &iteration{ - IteratorName: s.iteratorName, - Key: key, - Value: value, - Inherited: s.inherited, - } -} - -func (i *iteration) Object() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "key": i.Key, - "value": i.Value, - }) -} - -func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { - new := base.NewChild() - - if i != nil { - new.Variables = map[string]cty.Value{} - for name, otherIt := range i.Inherited { - new.Variables[name] = otherIt.Object() - } - new.Variables[i.IteratorName] = i.Object() - } - - return new -} - -func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { - if i == nil { - // Create entirely new root iteration, then - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - } - } - - inherited := map[string]*iteration{} - for name, otherIt := range i.Inherited { - inherited[name] = otherIt - } - inherited[i.IteratorName] = i - return &iteration{ - IteratorName: iteratorName, - Key: key, - Value: value, - Inherited: inherited, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go deleted file mode 100644 index a5bfd94e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go +++ /dev/null @@ -1,47 +0,0 @@ -// Package dynblock provides an extension to HCL that allows dynamic -// declaration of nested blocks in certain contexts via a special block type -// named "dynamic". -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Expand "dynamic" blocks in the given body, returning a new body that -// has those blocks expanded. -// -// The given EvalContext is used when evaluating "for_each" and "labels" -// attributes within dynamic blocks, allowing those expressions access to -// variables and functions beyond the iterator variable created by the -// iteration. -// -// Expand returns no diagnostics because no blocks are actually expanded -// until a call to Content or PartialContent on the returned body, which -// will then expand only the blocks selected by the schema. -// -// "dynamic" blocks are also expanded automatically within nested blocks -// in the given body, including within other dynamic blocks, thus allowing -// multi-dimensional iteration. However, it is not possible to -// dynamically-generate the "dynamic" blocks themselves except through nesting. -// -// parent { -// dynamic "child" { -// for_each = child_objs -// content { -// dynamic "grandchild" { -// for_each = child.value.children -// labels = [grandchild.key] -// content { -// parent_key = child.key -// value = grandchild.value -// } -// } -// } -// } -// } -func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { - return &expandBody{ - original: body, - forEachCtx: ctx, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go deleted file mode 100644 index b3907d6e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go +++ /dev/null @@ -1,50 +0,0 @@ -package dynblock - -import "github.com/hashicorp/hcl/v2" - -var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, -} - -var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - { - Name: "labels", - Required: true, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} - -var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: true, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - LabelNames: nil, - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go deleted file mode 100644 index ce98259a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go +++ /dev/null @@ -1,84 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// unknownBody is a funny body that just reports everything inside it as -// unknown. It uses a given other body as a sort of template for what attributes -// and blocks are inside -- including source location information -- but -// subsitutes unknown values of unknown type for all attributes. -// -// This rather odd process is used to handle expansion of dynamic blocks whose -// for_each expression is unknown. Since a block cannot itself be unknown, -// we instead arrange for everything _inside_ the block to be unknown instead, -// to give the best possible approximation. -type unknownBody struct { - template hcl.Body -} - -var _ hcl.Body = unknownBody{} - -func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, diags := b.template.Content(schema) - content = b.fixupContent(content) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, diags -} - -func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - content, remain, diags := b.template.PartialContent(schema) - content = b.fixupContent(content) - remain = unknownBody{remain} // remaining content must also be wrapped - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return content, remain, diags -} - -func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - attrs, diags := b.template.JustAttributes() - attrs = b.fixupAttrs(attrs) - - // We're intentionally preserving the diagnostics reported from the - // inner body so that we can still report where the template body doesn't - // match the requested schema. - return attrs, diags -} - -func (b unknownBody) MissingItemRange() hcl.Range { - return b.template.MissingItemRange() -} - -func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { - ret := &hcl.BodyContent{} - ret.Attributes = b.fixupAttrs(got.Attributes) - if len(got.Blocks) > 0 { - ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) - for _, gotBlock := range got.Blocks { - new := *gotBlock // shallow copy - new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown - ret.Blocks = append(ret.Blocks, &new) - } - } - - return ret -} - -func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { - if len(got) == 0 { - return nil - } - ret := make(hcl.Attributes, len(got)) - for name, gotAttr := range got { - new := *gotAttr // shallow copy - new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) - ret[name] = &new - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go deleted file mode 100644 index 19233929..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go +++ /dev/null @@ -1,209 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// WalkVariables begins the recursive process of walking all expressions and -// nested blocks in the given body and its child bodies while taking into -// account any "dynamic" blocks. -// -// This function requires that the caller walk through the nested block -// structure in the given body level-by-level so that an appropriate schema -// can be provided at each level to inform further processing. This workflow -// is thus easiest to use for calling applications that have some higher-level -// schema representation available with which to drive this multi-step -// process. If your application uses the hcldec package, you may be able to -// use VariablesHCLDec instead for a more automatic approach. -func WalkVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - includeContent: true, - } -} - -// WalkExpandVariables is like Variables but it includes only the variables -// required for successful block expansion, ignoring any variables referenced -// inside block contents. The result is the minimal set of all variables -// required for a call to Expand, excluding variables that would only be -// needed to subsequently call Content or PartialContent on the expanded -// body. -func WalkExpandVariables(body hcl.Body) WalkVariablesNode { - return WalkVariablesNode{ - body: body, - } -} - -type WalkVariablesNode struct { - body hcl.Body - it *iteration - - includeContent bool -} - -type WalkVariablesChild struct { - BlockTypeName string - Node WalkVariablesNode -} - -// Body returns the HCL Body associated with the child node, in case the caller -// wants to do some sort of inspection of it in order to decide what schema -// to pass to Visit. -// -// Most implementations should just fetch a fixed schema based on the -// BlockTypeName field and not access this. Deciding on a schema dynamically -// based on the body is a strange thing to do and generally necessary only if -// your caller is already doing other bizarre things with HCL bodies. -func (c WalkVariablesChild) Body() hcl.Body { - return c.Node.body -} - -// Visit returns the variable traversals required for any "dynamic" blocks -// directly in the body associated with this node, and also returns any child -// nodes that must be visited in order to continue the walk. -// -// Each child node has its associated block type name given in its BlockTypeName -// field, which the calling application should use to determine the appropriate -// schema for the content of each child node and pass it to the child node's -// own Visit method to continue the walk recursively. -func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { - extSchema := n.extendSchema(schema) - container, _, _ := n.body.PartialContent(extSchema) - if container == nil { - return vars, children - } - - children = make([]WalkVariablesChild, 0, len(container.Blocks)) - - if n.includeContent { - for _, attr := range container.Attributes { - for _, traversal := range attr.Expr.Variables() { - var ours, inherited bool - if n.it != nil { - ours = traversal.RootName() == n.it.IteratorName - _, inherited = n.it.Inherited[traversal.RootName()] - } - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - } - - for _, block := range container.Blocks { - switch block.Type { - - case "dynamic": - blockTypeName := block.Labels[0] - inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) - if inner == nil { - continue - } - - iteratorName := blockTypeName - if attr, exists := inner.Attributes["iterator"]; exists { - iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) - if len(iterTraversal) == 0 { - // Ignore this invalid dynamic block, since it'll produce - // an error if someone tries to extract content from it - // later anyway. - continue - } - iteratorName = iterTraversal.RootName() - } - blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) - - if attr, exists := inner.Attributes["for_each"]; exists { - // Filter out iterator names inherited from parent blocks - for _, traversal := range attr.Expr.Variables() { - if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { - vars = append(vars, traversal) - } - } - } - if attr, exists := inner.Attributes["labels"]; exists { - // Filter out both our own iterator name _and_ those inherited - // from parent blocks, since we provide _both_ of these to the - // label expressions. - for _, traversal := range attr.Expr.Variables() { - ours := traversal.RootName() == iteratorName - _, inherited := blockIt.Inherited[traversal.RootName()] - - if !(ours || inherited) { - vars = append(vars, traversal) - } - } - } - - for _, contentBlock := range inner.Blocks { - // We only request "content" blocks in our schema, so we know - // any blocks we find here will be content blocks. We require - // exactly one content block for actual expansion, but we'll - // be more liberal here so that callers can still collect - // variables from erroneous "dynamic" blocks. - children = append(children, WalkVariablesChild{ - BlockTypeName: blockTypeName, - Node: WalkVariablesNode{ - body: contentBlock.Body, - it: blockIt, - includeContent: n.includeContent, - }, - }) - } - - default: - children = append(children, WalkVariablesChild{ - BlockTypeName: block.Type, - Node: WalkVariablesNode{ - body: block.Body, - it: n.it, - includeContent: n.includeContent, - }, - }) - - } - } - - return vars, children -} - -func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { - // We augment the requested schema to also include our special "dynamic" - // block type, since then we'll get instances of it interleaved with - // all of the literal child blocks we must also include. - extSchema := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - copy(extSchema.Blocks, schema.Blocks) - extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) - - return extSchema -} - -// This is a more relaxed schema than what's in schema.go, since we -// want to maximize the amount of variables we can find even if there -// are erroneous blocks. -var variableDetectionInnerSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "for_each", - Required: false, - }, - { - Name: "labels", - Required: false, - }, - { - Name: "iterator", - Required: false, - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "content", - }, - }, -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go deleted file mode 100644 index 907ef3eb..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go +++ /dev/null @@ -1,43 +0,0 @@ -package dynblock - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" -) - -// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec -// specification to automatically drive the recursive walk through nested -// blocks in the given body. -// -// This is a drop-in replacement for hcldec.Variables which is able to treat -// blocks of type "dynamic" in the same special way that dynblock.Expand would, -// exposing both the variables referenced in the "for_each" and "labels" -// arguments and variables used in the nested "content" block. -func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the -// minimal set of variables required to call Expand, ignoring variables that -// are referenced only inside normal block contents. See WalkExpandVariables -// for more information. -func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { - rootNode := WalkExpandVariables(body) - return walkVariablesWithHCLDec(rootNode, spec) -} - -func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { - vars, children := node.Visit(hcldec.ImpliedSchema(spec)) - - if len(children) > 0 { - childSpecs := hcldec.ChildBlockTypes(spec) - for _, child := range children { - if childSpec, exists := childSpecs[child.BlockTypeName]; exists { - vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) - } - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md deleted file mode 100644 index 5d56eeca..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# "Try" and "can" functions - -This Go package contains two `cty` functions intended for use in an -`hcl.EvalContext` when evaluating HCL native syntax expressions. - -The first function `try` attempts to evaluate each of its argument expressions -in order until one produces a result without any errors. - -```hcl -try(non_existent_variable, 2) # returns 2 -``` - -If none of the expressions succeed, the function call fails with all of the -errors it encountered. - -The second function `can` is similar except that it ignores the result of -the given expression altogether and simply returns `true` if the expression -produced a successful result or `false` if it produced errors. - -Both of these are primarily intended for working with deep data structures -which might not have a dependable shape. For example, we can use `try` to -attempt to fetch a value from deep inside a data structure but produce a -default value if any step of the traversal fails: - -```hcl -result = try(foo.deep[0].lots.of["traversals"], null) -``` - -The final result to `try` should generally be some sort of constant value that -will always evaluate successfully. - -## Using these functions - -Languages built on HCL can make `try` and `can` available to user code by -exporting them in the `hcl.EvalContext` used for expression evaluation: - -```go -ctx := &hcl.EvalContext{ - Functions: map[string]function.Function{ - "try": tryfunc.TryFunc, - "can": tryfunc.CanFunc, - }, -} -``` diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go deleted file mode 100644 index 2f4862f4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go +++ /dev/null @@ -1,150 +0,0 @@ -// Package tryfunc contains some optional functions that can be exposed in -// HCL-based languages to allow authors to test whether a particular expression -// can succeed and take dynamic action based on that result. -// -// These functions are implemented in terms of the customdecode extension from -// the sibling directory "customdecode", and so they are only useful when -// used within an HCL EvalContext. Other systems using cty functions are -// unlikely to support the HCL-specific "customdecode" extension. -package tryfunc - -import ( - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// TryFunc is a variadic function that tries to evaluate all of is arguments -// in sequence until one succeeds, in which case it returns that result, or -// returns an error if none of them succeed. -var TryFunc function.Function - -// CanFunc tries to evaluate the expression given in its first argument. -var CanFunc function.Function - -func init() { - TryFunc = function.New(&function.Spec{ - VarParam: &function.Parameter{ - Name: "expressions", - Type: customdecode.ExpressionClosureType, - }, - Type: func(args []cty.Value) (cty.Type, error) { - v, err := try(args) - if err != nil { - return cty.NilType, err - } - return v.Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return try(args) - }, - }) - CanFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "expression", - Type: customdecode.ExpressionClosureType, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return can(args[0]) - }, - }) -} - -func try(args []cty.Value) (cty.Value, error) { - if len(args) == 0 { - return cty.NilVal, errors.New("at least one argument is required") - } - - // We'll collect up all of the diagnostics we encounter along the way - // and report them all if none of the expressions succeed, so that the - // user might get some hints on how to make at least one succeed. - var diags hcl.Diagnostics - for _, arg := range args { - closure := customdecode.ExpressionClosureFromVal(arg) - if dependsOnUnknowns(closure.Expression, closure.EvalContext) { - // We can't safely decide if this expression will succeed yet, - // and so our entire result must be unknown until we have - // more information. - return cty.DynamicVal, nil - } - - v, moreDiags := closure.Value() - diags = append(diags, moreDiags...) - if moreDiags.HasErrors() { - continue // try the next one, if there is one to try - } - return v, nil // ignore any accumulated diagnostics if one succeeds - } - - // If we fall out here then none of the expressions succeeded, and so - // we must have at least one diagnostic and we'll return all of them - // so that the user can see the errors related to whichever one they - // were expecting to have succeeded in this case. - // - // Because our function must return a single error value rather than - // diagnostics, we'll construct a suitable error message string - // that will make sense in the context of the function call failure - // diagnostic HCL will eventually wrap this in. - var buf strings.Builder - buf.WriteString("no expression succeeded:\n") - for _, diag := range diags { - if diag.Subject != nil { - buf.WriteString(fmt.Sprintf("- %s (at %s)\n %s\n", diag.Summary, diag.Subject, diag.Detail)) - } else { - buf.WriteString(fmt.Sprintf("- %s\n %s\n", diag.Summary, diag.Detail)) - } - } - buf.WriteString("\nAt least one expression must produce a successful result") - return cty.NilVal, errors.New(buf.String()) -} - -func can(arg cty.Value) (cty.Value, error) { - closure := customdecode.ExpressionClosureFromVal(arg) - if dependsOnUnknowns(closure.Expression, closure.EvalContext) { - // Can't decide yet, then. - return cty.UnknownVal(cty.Bool), nil - } - - _, diags := closure.Value() - if diags.HasErrors() { - return cty.False, nil - } - return cty.True, nil -} - -// dependsOnUnknowns returns true if any of the variables that the given -// expression might access are unknown values or contain unknown values. -// -// This is a conservative result that prefers to return true if there's any -// chance that the expression might derive from an unknown value during its -// evaluation; it is likely to produce false-positives for more complex -// expressions involving deep data structures. -func dependsOnUnknowns(expr hcl.Expression, ctx *hcl.EvalContext) bool { - for _, traversal := range expr.Variables() { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // If the traversal returned a definitive error then it must - // not traverse through any unknowns. - continue - } - if !val.IsWhollyKnown() { - // The value will be unknown if either it refers directly to - // an unknown value or if the traversal moves through an unknown - // collection. We're using IsWhollyKnown, so this also catches - // situations where the traversal refers to a compound data - // structure that contains any unknown values. That's important, - // because during evaluation the expression might evaluate more - // deeply into this structure and encounter the unknowns. - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md deleted file mode 100644 index 058f1e3d..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md +++ /dev/null @@ -1,135 +0,0 @@ -# HCL Type Expressions Extension - -This HCL extension defines a convention for describing HCL types using function -call and variable reference syntax, allowing configuration formats to include -type information provided by users. - -The type syntax is processed statically from a hcl.Expression, so it cannot -use any of the usual language operators. This is similar to type expressions -in statically-typed programming languages. - -```hcl -variable "example" { - type = list(string) -} -``` - -The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` -functions, and so it relies on the underlying syntax to define how "keyword" -and "call" are interpreted. The above shows how they are interpreted in -the HCL native syntax, while the following shows the same information -expressed in JSON: - -```json -{ - "variable": { - "example": { - "type": "list(string)" - } - } -} -``` - -Notice that since we have additional contextual information that we intend -to allow only calls and keywords the JSON syntax is able to parse the given -string directly as an expression, rather than as a template as would be -the case for normal expression evaluation. - -For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr). - -## Type Expression Syntax - -When expressed in the native syntax, the following expressions are permitted -in a type expression: - -* `string` - string -* `bool` - boolean -* `number` - number -* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) -* `list()` - list of the type given as an argument -* `set()` - set of the type given as an argument -* `map()` - map of the type given as an argument -* `tuple([])` - tuple with the element types given in the single list argument -* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument - -For example: - -* `list(string)` -* `object({name=string,age=number})` -* `map(object({name=string,age=number}))` - -Note that the object constructor syntax is not fully-general for all possible -object types because it requires the attribute names to be valid identifiers. -In practice it is expected that any time an object type is being fixed for -type checking it will be one that has identifiers as its attributes; object -types with weird attributes generally show up only from arbitrary object -constructors in configuration files, which are usually treated either as maps -or as the dynamic pseudo-type. - -## Type Constraints as Values - -Along with defining a convention for writing down types using HCL expression -constructs, this package also includes a mechanism for representing types as -values that can be used as data within an HCL-based language. - -`typeexpr.TypeConstraintType` is a -[`cty` capsule type](https://github.com/zclconf/go-cty/blob/master/docs/types.md#capsule-types) -that encapsulates `cty.Type` values. You can construct such a value directly -using the `TypeConstraintVal` function: - -```go -tyVal := typeexpr.TypeConstraintVal(cty.String) - -// We can unpack the type from a value using TypeConstraintFromVal -ty := typeExpr.TypeConstraintFromVal(tyVal) -``` - -However, the primary purpose of `typeexpr.TypeConstraintType` is to be -specified as the type constraint for an argument, in which case it serves -as a signal for HCL to treat the argument expression as a type constraint -expression as defined above, rather than as a normal value expression. - -"An argument" in the above in practice means the following two locations: - -* As the type constraint for a parameter of a cty function that will be - used in an `hcl.EvalContext`. In that case, function calls in the HCL - native expression syntax will require the argument to be valid type constraint - expression syntax and the function implementation will receive a - `TypeConstraintType` value as the argument value for that parameter. - -* As the type constraint for a `hcldec.AttrSpec` or `hcldec.BlockAttrsSpec` - when decoding an HCL body using `hcldec`. In that case, the attributes - with that type constraint will be required to be valid type constraint - expression syntax and the result will be a `TypeConstraintType` value. - -Note that the special handling of these arguments means that an argument -marked in this way must use the type constraint syntax directly. It is not -valid to pass in a value of `TypeConstraintType` that has been obtained -dynamically via some other expression result. - -`TypeConstraintType` is provided with the intent of using it internally within -application code when incorporating type constraint expression syntax into -an HCL-based language, not to be used for dynamic "programming with types". A -calling application could support programming with types by defining its _own_ -capsule type, but that is not the purpose of `TypeConstraintType`. - -## The "convert" `cty` Function - -Building on the `TypeConstraintType` described in the previous section, this -package also provides `typeexpr.ConvertFunc` which is a cty function that -can be placed into a `cty.EvalContext` (conventionally named "convert") in -order to provide a general type conversion function in an HCL-based language: - -```hcl - foo = convert("true", bool) -``` - -The second parameter uses the mechanism described in the previous section to -require its argument to be a type constraint expression rather than a value -expression. In doing so, it allows converting with any type constraint that -can be expressed in this package's type constraint syntax. In the above example, -the `foo` argument would receive a boolean true, or `cty.True` in `cty` terms. - -The target type constraint must always be provided statically using inline -type constraint syntax. There is no way to _dynamically_ select a type -constraint using this function. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go deleted file mode 100644 index c4b37957..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package typeexpr extends HCL with a convention for describing HCL types -// within configuration files. -// -// The type syntax is processed statically from a hcl.Expression, so it cannot -// use any of the usual language operators. This is similar to type expressions -// in statically-typed programming languages. -// -// variable "example" { -// type = list(string) -// } -package typeexpr diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go deleted file mode 100644 index 11b06897..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go +++ /dev/null @@ -1,196 +0,0 @@ -package typeexpr - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -const invalidTypeSummary = "Invalid type specification" - -// getType is the internal implementation of both Type and TypeConstraint, -// using the passed flag to distinguish. When constraint is false, the "any" -// keyword will produce an error. -func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { - // First we'll try for one of our keywords - kw := hcl.ExprAsKeyword(expr) - switch kw { - case "bool": - return cty.Bool, nil - case "string": - return cty.String, nil - case "number": - return cty.Number, nil - case "any": - if constraint { - return cty.DynamicPseudoType, nil - } - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), - Subject: expr.Range().Ptr(), - }} - case "list", "map", "set": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), - Subject: expr.Range().Ptr(), - }} - case "object": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: expr.Range().Ptr(), - }} - case "tuple": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: expr.Range().Ptr(), - }} - case "": - // okay! we'll fall through and try processing as a call, then. - default: - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), - Subject: expr.Range().Ptr(), - }} - } - - // If we get down here then our expression isn't just a keyword, so we'll - // try to process it as a call instead. - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", - Subject: expr.Range().Ptr(), - }} - } - - switch call.Name { - case "bool", "string", "number", "any": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), - Subject: &call.ArgsRange, - }} - } - - if len(call.Arguments) != 1 { - contextRange := call.ArgsRange - subjectRange := call.ArgsRange - if len(call.Arguments) > 1 { - // If we have too many arguments (as opposed to too _few_) then - // we'll highlight the extraneous arguments as the diagnostic - // subject. - subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) - } - - switch call.Name { - case "list", "set", "map": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), - Subject: &subjectRange, - Context: &contextRange, - }} - case "object": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", - Subject: &subjectRange, - Context: &contextRange, - }} - case "tuple": - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "The tuple type constructor requires one argument specifying the element types as a list.", - Subject: &subjectRange, - Context: &contextRange, - }} - } - } - - switch call.Name { - - case "list": - ety, diags := getType(call.Arguments[0], constraint) - return cty.List(ety), diags - case "set": - ety, diags := getType(call.Arguments[0], constraint) - return cty.Set(ety), diags - case "map": - ety, diags := getType(call.Arguments[0], constraint) - return cty.Map(ety), diags - case "object": - attrDefs, diags := hcl.ExprMap(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - - atys := make(map[string]cty.Type) - for _, attrDef := range attrDefs { - attrName := hcl.ExprAsKeyword(attrDef.Key) - if attrName == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Object constructor map keys must be attribute names.", - Subject: attrDef.Key.Range().Ptr(), - Context: expr.Range().Ptr(), - }) - continue - } - aty, attrDiags := getType(attrDef.Value, constraint) - diags = append(diags, attrDiags...) - atys[attrName] = aty - } - return cty.Object(atys), diags - case "tuple": - elemDefs, diags := hcl.ExprList(call.Arguments[0]) - if diags.HasErrors() { - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: "Tuple type constructor requires a list of element types.", - Subject: call.Arguments[0].Range().Ptr(), - Context: expr.Range().Ptr(), - }} - } - etys := make([]cty.Type, len(elemDefs)) - for i, defExpr := range elemDefs { - ety, elemDiags := getType(defExpr, constraint) - diags = append(diags, elemDiags...) - etys[i] = ety - } - return cty.Tuple(etys), diags - default: - // Can't access call.Arguments in this path because we've not validated - // that it contains exactly one expression here. - return cty.DynamicPseudoType, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: invalidTypeSummary, - Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), - Subject: expr.Range().Ptr(), - }} - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go deleted file mode 100644 index 3b8f618f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go +++ /dev/null @@ -1,129 +0,0 @@ -package typeexpr - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Type attempts to process the given expression as a type expression and, if -// successful, returns the resulting type. If unsuccessful, error diagnostics -// are returned. -func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - return getType(expr, false) -} - -// TypeConstraint attempts to parse the given expression as a type constraint -// and, if successful, returns the resulting type. If unsuccessful, error -// diagnostics are returned. -// -// A type constraint has the same structure as a type, but it additionally -// allows the keyword "any" to represent cty.DynamicPseudoType, which is often -// used as a wildcard in type checking and type conversion operations. -func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { - return getType(expr, true) -} - -// TypeString returns a string rendering of the given type as it would be -// expected to appear in the HCL native syntax. -// -// This is primarily intended for showing types to the user in an application -// that uses typexpr, where the user can be assumed to be familiar with the -// type expression syntax. In applications that do not use typeexpr these -// results may be confusing to the user and so type.FriendlyName may be -// preferable, even though it's less precise. -// -// TypeString produces reasonable results only for types like what would be -// produced by the Type and TypeConstraint functions. In particular, it cannot -// support capsule types. -func TypeString(ty cty.Type) string { - // Easy cases first - switch ty { - case cty.String: - return "string" - case cty.Bool: - return "bool" - case cty.Number: - return "number" - case cty.DynamicPseudoType: - return "any" - } - - if ty.IsCapsuleType() { - panic("TypeString does not support capsule types") - } - - if ty.IsCollectionType() { - ety := ty.ElementType() - etyString := TypeString(ety) - switch { - case ty.IsListType(): - return fmt.Sprintf("list(%s)", etyString) - case ty.IsSetType(): - return fmt.Sprintf("set(%s)", etyString) - case ty.IsMapType(): - return fmt.Sprintf("map(%s)", etyString) - default: - // Should never happen because the above is exhaustive - panic("unsupported collection type") - } - } - - if ty.IsObjectType() { - var buf bytes.Buffer - buf.WriteString("object({") - atys := ty.AttributeTypes() - names := make([]string, 0, len(atys)) - for name := range atys { - names = append(names, name) - } - sort.Strings(names) - first := true - for _, name := range names { - aty := atys[name] - if !first { - buf.WriteByte(',') - } - if !hclsyntax.ValidIdentifier(name) { - // Should never happen for any type produced by this package, - // but we'll do something reasonable here just so we don't - // produce garbage if someone gives us a hand-assembled object - // type that has weird attribute names. - // Using Go-style quoting here isn't perfect, since it doesn't - // exactly match HCL syntax, but it's fine for an edge-case. - buf.WriteString(fmt.Sprintf("%q", name)) - } else { - buf.WriteString(name) - } - buf.WriteByte('=') - buf.WriteString(TypeString(aty)) - first = false - } - buf.WriteString("})") - return buf.String() - } - - if ty.IsTupleType() { - var buf bytes.Buffer - buf.WriteString("tuple([") - etys := ty.TupleElementTypes() - first := true - for _, ety := range etys { - if !first { - buf.WriteByte(',') - } - buf.WriteString(TypeString(ety)) - first = false - } - buf.WriteString("])") - return buf.String() - } - - // Should never happen because we covered all cases above. - panic(fmt.Errorf("unsupported type %#v", ty)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go deleted file mode 100644 index 5462d82c..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go +++ /dev/null @@ -1,118 +0,0 @@ -package typeexpr - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// TypeConstraintType is a cty capsule type that allows cty type constraints to -// be used as values. -// -// If TypeConstraintType is used in a context supporting the -// customdecode.CustomExpressionDecoder extension then it will implement -// expression decoding using the TypeConstraint function, thus allowing -// type expressions to be used in contexts where value expressions might -// normally be expected, such as in arguments to function calls. -var TypeConstraintType cty.Type - -// TypeConstraintVal constructs a cty.Value whose type is -// TypeConstraintType. -func TypeConstraintVal(ty cty.Type) cty.Value { - return cty.CapsuleVal(TypeConstraintType, &ty) -} - -// TypeConstraintFromVal extracts the type from a cty.Value of -// TypeConstraintType that was previously constructed using TypeConstraintVal. -// -// If the given value isn't a known, non-null value of TypeConstraintType -// then this function will panic. -func TypeConstraintFromVal(v cty.Value) cty.Type { - if !v.Type().Equals(TypeConstraintType) { - panic("value is not of TypeConstraintType") - } - ptr := v.EncapsulatedValue().(*cty.Type) - return *ptr -} - -// ConvertFunc is a cty function that implements type conversions. -// -// Its signature is as follows: -// convert(value, type_constraint) -// -// ...where type_constraint is a type constraint expression as defined by -// typeexpr.TypeConstraint. -// -// It relies on HCL's customdecode extension and so it's not suitable for use -// in non-HCL contexts or if you are using a HCL syntax implementation that -// does not support customdecode for function arguments. However, it _is_ -// supported for function calls in the HCL native expression syntax. -var ConvertFunc function.Function - -func init() { - TypeConstraintType = cty.CapsuleWithOps("type constraint", reflect.TypeOf(cty.Type{}), &cty.CapsuleOps{ - ExtensionData: func(key interface{}) interface{} { - switch key { - case customdecode.CustomExpressionDecoder: - return customdecode.CustomExpressionDecoderFunc( - func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - ty, diags := TypeConstraint(expr) - if diags.HasErrors() { - return cty.NilVal, diags - } - return TypeConstraintVal(ty), nil - }, - ) - default: - return nil - } - }, - TypeGoString: func(_ reflect.Type) string { - return "typeexpr.TypeConstraintType" - }, - GoString: func(raw interface{}) string { - tyPtr := raw.(*cty.Type) - return fmt.Sprintf("typeexpr.TypeConstraintVal(%#v)", *tyPtr) - }, - RawEquals: func(a, b interface{}) bool { - aPtr := a.(*cty.Type) - bPtr := b.(*cty.Type) - return (*aPtr).Equals(*bPtr) - }, - }) - - ConvertFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - { - Name: "type", - Type: TypeConstraintType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - wantTypePtr := args[1].EncapsulatedValue().(*cty.Type) - got, err := convert.Convert(args[0], *wantTypePtr) - if err != nil { - return cty.NilType, function.NewArgError(0, err) - } - return got.Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - v, err := convert.Convert(args[0], retType) - if err != nil { - return cty.NilVal, function.NewArgError(0, err) - } - return v, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go deleted file mode 100644 index f0d589d7..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go +++ /dev/null @@ -1,322 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// DecodeBody extracts the configuration within the given body into the given -// value. This value must be a non-nil pointer to either a struct or -// a map, where in the former case the configuration will be decoded using -// struct tags and in the latter case only attributes are allowed and their -// values are decoded into the map. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - rv := reflect.ValueOf(val) - if rv.Kind() != reflect.Ptr { - panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) - } - - return decodeBodyToValue(body, ctx, rv.Elem()) -} - -func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - et := val.Type() - switch et.Kind() { - case reflect.Struct: - return decodeBodyToStruct(body, ctx, val) - case reflect.Map: - return decodeBodyToMap(body, ctx, val) - default: - panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) - } -} - -func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { - schema, partial := ImpliedBodySchema(val.Interface()) - - var content *hcl.BodyContent - var leftovers hcl.Body - var diags hcl.Diagnostics - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - if content == nil { - return diags - } - - tags := getFieldTags(val.Type()) - - if tags.Remain != nil { - fieldIdx := *tags.Remain - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - switch { - case bodyType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(leftovers)) - case attrsType.AssignableTo(field.Type): - attrs, attrsDiags := leftovers.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - fieldV.Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) - } - } - - for name, fieldIdx := range tags.Attributes { - attr := content.Attributes[name] - field := val.Type().Field(fieldIdx) - fieldV := val.Field(fieldIdx) - - if attr == nil { - if !exprType.AssignableTo(field.Type) { - continue - } - - // As a special case, if the target is of type hcl.Expression then - // we'll assign an actual expression that evalues to a cty null, - // so the caller can deal with it within the cty realm rather - // than within the Go realm. - synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) - fieldV.Set(reflect.ValueOf(synthExpr)) - continue - } - - switch { - case attrType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr)) - case exprType.AssignableTo(field.Type): - fieldV.Set(reflect.ValueOf(attr.Expr)) - default: - diags = append(diags, DecodeExpression( - attr.Expr, ctx, fieldV.Addr().Interface(), - )...) - } - } - - blocksByType := content.Blocks.ByType() - - for typeName, fieldIdx := range tags.Blocks { - blocks := blocksByType[typeName] - field := val.Type().Field(fieldIdx) - - ty := field.Type - isSlice := false - isPtr := false - if ty.Kind() == reflect.Slice { - isSlice = true - ty = ty.Elem() - } - if ty.Kind() == reflect.Ptr { - isPtr = true - ty = ty.Elem() - } - - if len(blocks) > 1 && !isSlice { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", typeName), - Detail: fmt.Sprintf( - "Only one %s block is allowed. Another was defined at %s.", - typeName, blocks[0].DefRange.String(), - ), - Subject: &blocks[1].DefRange, - }) - continue - } - - if len(blocks) == 0 { - if isSlice || isPtr { - if val.Field(fieldIdx).IsNil() { - val.Field(fieldIdx).Set(reflect.Zero(field.Type)) - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", typeName), - Detail: fmt.Sprintf("A %s block is required.", typeName), - Subject: body.MissingItemRange().Ptr(), - }) - } - continue - } - - switch { - - case isSlice: - elemType := ty - if isPtr { - elemType = reflect.PtrTo(ty) - } - sli := val.Field(fieldIdx) - if sli.IsNil() { - sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) - } - - for i, block := range blocks { - if isPtr { - if i >= sli.Len() { - sli = reflect.Append(sli, reflect.New(ty)) - } - v := sli.Index(i) - if v.IsNil() { - v = reflect.New(ty) - } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - sli.Index(i).Set(v) - } else { - diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) - } - } - - if sli.Len() > len(blocks) { - sli.SetLen(len(blocks)) - } - - val.Field(fieldIdx).Set(sli) - - default: - block := blocks[0] - if isPtr { - v := val.Field(fieldIdx) - if v.IsNil() { - v = reflect.New(ty) - } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) - val.Field(fieldIdx).Set(v) - } else { - diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) - } - - } - - } - - return diags -} - -func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - attrs, diags := body.JustAttributes() - if attrs == nil { - return diags - } - - mv := reflect.MakeMap(v.Type()) - - for k, attr := range attrs { - switch { - case attrType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) - case exprType.AssignableTo(v.Type().Elem()): - mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) - default: - ev := reflect.New(v.Type().Elem()) - diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) - mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) - } - } - - v.Set(mv) - - return diags -} - -func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { - var diags hcl.Diagnostics - - ty := v.Type() - - switch { - case blockType.AssignableTo(ty): - v.Elem().Set(reflect.ValueOf(block)) - case bodyType.AssignableTo(ty): - v.Elem().Set(reflect.ValueOf(block.Body)) - case attrsType.AssignableTo(ty): - attrs, attrsDiags := block.Body.JustAttributes() - if len(attrsDiags) > 0 { - diags = append(diags, attrsDiags...) - } - v.Elem().Set(reflect.ValueOf(attrs)) - default: - diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) - - if len(block.Labels) > 0 { - blockTags := getFieldTags(ty) - for li, lv := range block.Labels { - lfieldIdx := blockTags.Labels[li].FieldIndex - v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) - } - } - - } - - return diags -} - -// DecodeExpression extracts the value of the given expression into the given -// value. This value must be something that gocty is able to decode into, -// since the final decoding is delegated to that package. -// -// The given EvalContext is used to resolve any variables or functions in -// expressions encountered while decoding. This may be nil to require only -// constant values, for simple applications that do not support variables or -// functions. -// -// The returned diagnostics should be inspected with its HasErrors method to -// determine if the populated value is valid and complete. If error diagnostics -// are returned then the given value may have been partially-populated but -// may still be accessed by a careful caller for static analysis and editor -// integration use-cases. -func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { - srcVal, diags := expr.Value(ctx) - - convTy, err := gocty.ImpliedType(val) - if err != nil { - panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) - } - - srcVal, err = convert.Convert(srcVal, convTy) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - return diags - } - - err = gocty.FromCtyValue(srcVal, val) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), - Subject: expr.StartRange().Ptr(), - Context: expr.Range().Ptr(), - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go deleted file mode 100644 index aa3c6ea9..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go +++ /dev/null @@ -1,53 +0,0 @@ -// Package gohcl allows decoding HCL configurations into Go data structures. -// -// It provides a convenient and concise way of describing the schema for -// configuration and then accessing the resulting data via native Go -// types. -// -// A struct field tag scheme is used, similar to other decoding and -// unmarshalling libraries. The tags are formatted as in the following example: -// -// ThingType string `hcl:"thing_type,attr"` -// -// Within each tag there are two comma-separated tokens. The first is the -// name of the corresponding construct in configuration, while the second -// is a keyword giving the kind of construct expected. The following -// kind keywords are supported: -// -// attr (the default) indicates that the value is to be populated from an attribute -// block indicates that the value is to populated from a block -// label indicates that the value is to populated from a block label -// remain indicates that the value is to be populated from the remaining body after populating other fields -// -// "attr" fields may either be of type *hcl.Expression, in which case the raw -// expression is assigned, or of any type accepted by gocty, in which case -// gocty will be used to assign the value to a native Go type. -// -// "block" fields may be of type *hcl.Block or hcl.Body, in which case the -// corresponding raw value is assigned, or may be a struct that recursively -// uses the same tags. Block fields may also be slices of any of these types, -// in which case multiple blocks of the corresponding type are decoded into -// the slice. -// -// "label" fields are considered only in a struct used as the type of a field -// marked as "block", and are used sequentially to capture the labels of -// the blocks being decoded. In this case, the name token is used only as -// an identifier for the label in diagnostic messages. -// -// "remain" can be placed on a single field that may be either of type -// hcl.Body or hcl.Attributes, in which case any remaining body content is -// placed into this field for delayed processing. If no "remain" field is -// present then any attributes or blocks not matched by another valid tag -// will cause an error diagnostic. -// -// Only a subset of this tagging/typing vocabulary is supported for the -// "Encode" family of functions. See the EncodeIntoBody docs for full details -// on the constraints there. -// -// Broadly-speaking this package deals with two types of error. The first is -// errors in the configuration itself, which are returned as diagnostics -// written with the configuration author as the target audience. The second -// is bugs in the calling program, such as invalid struct tags, which are -// surfaced via panics since there can be no useful runtime handling of such -// errors and they should certainly not be returned to the user as diagnostics. -package gohcl diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go deleted file mode 100644 index d612e09c..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go +++ /dev/null @@ -1,191 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - - "github.com/hashicorp/hcl/v2/hclwrite" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EncodeIntoBody replaces the contents of the given hclwrite Body with -// attributes and blocks derived from the given value, which must be a -// struct value or a pointer to a struct value with the struct tags defined -// in this package. -// -// This function can work only with fully-decoded data. It will ignore any -// fields tagged as "remain", any fields that decode attributes into either -// hcl.Attribute or hcl.Expression values, and any fields that decode blocks -// into hcl.Attributes values. This function does not have enough information -// to complete the decoding of these types. -// -// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock -// to produce a whole hclwrite.Block including block labels. -// -// As long as a suitable value is given to encode and the destination body -// is non-nil, this function will always complete. It will panic in case of -// any errors in the calling program, such as passing an inappropriate type -// or a nil body. -// -// The layout of the resulting HCL source is derived from the ordering of -// the struct fields, with blank lines around nested blocks of different types. -// Fields representing attributes should usually precede those representing -// blocks so that the attributes can group togather in the result. For more -// control, use the hclwrite API directly. -func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - populateBody(rv, ty, tags, dst) -} - -// EncodeAsBlock creates a new hclwrite.Block populated with the data from -// the given value, which must be a struct or pointer to struct with the -// struct tags defined in this package. -// -// If the given struct type has fields tagged with "label" tags then they -// will be used in order to annotate the created block with labels. -// -// This function has the same constraints as EncodeIntoBody and will panic -// if they are violated. -func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { - rv := reflect.ValueOf(val) - ty := rv.Type() - if ty.Kind() == reflect.Ptr { - rv = rv.Elem() - ty = rv.Type() - } - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) - } - - tags := getFieldTags(ty) - labels := make([]string, len(tags.Labels)) - for i, lf := range tags.Labels { - lv := rv.Field(lf.FieldIndex) - // We just stringify whatever we find. It should always be a string - // but if not then we'll still do something reasonable. - labels[i] = fmt.Sprintf("%s", lv.Interface()) - } - - block := hclwrite.NewBlock(blockType, labels) - populateBody(rv, ty, tags, block.Body()) - return block -} - -func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { - nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) - namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) - for n, i := range tags.Attributes { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - for n, i := range tags.Blocks { - nameIdxs[n] = i - namesOrder = append(namesOrder, n) - } - sort.SliceStable(namesOrder, func(i, j int) bool { - ni, nj := namesOrder[i], namesOrder[j] - return nameIdxs[ni] < nameIdxs[nj] - }) - - dst.Clear() - - prevWasBlock := false - for _, name := range namesOrder { - fieldIdx := nameIdxs[name] - field := ty.Field(fieldIdx) - fieldTy := field.Type - fieldVal := rv.Field(fieldIdx) - - if fieldTy.Kind() == reflect.Ptr { - fieldTy = fieldTy.Elem() - fieldVal = fieldVal.Elem() - } - - if _, isAttr := tags.Attributes[name]; isAttr { - - if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { - continue // ignore undecoded fields - } - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - if prevWasBlock { - dst.AppendNewline() - prevWasBlock = false - } - - valTy, err := gocty.ImpliedType(fieldVal.Interface()) - if err != nil { - panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) - } - - val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) - if err != nil { - // This should never happen, since we should always be able - // to decode into the implied type. - panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) - } - - dst.SetAttributeValue(name, val) - - } else { // must be a block, then - elemTy := fieldTy - isSeq := false - if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { - isSeq = true - elemTy = elemTy.Elem() - } - - if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { - continue // ignore undecoded fields - } - prevWasBlock = false - - if isSeq { - l := fieldVal.Len() - for i := 0; i < l; i++ { - elemVal := fieldVal.Index(i) - if !elemVal.IsValid() { - continue // ignore (elem value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(elemVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } else { - if !fieldVal.IsValid() { - continue // ignore (field value is nil pointer) - } - if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { - continue // ignore - } - block := EncodeAsBlock(fieldVal.Interface(), name) - if !prevWasBlock { - dst.AppendNewline() - prevWasBlock = true - } - dst.AppendBlock(block) - } - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go deleted file mode 100644 index ecf6ddba..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go +++ /dev/null @@ -1,174 +0,0 @@ -package gohcl - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the -// given value, which must be a struct value or a pointer to one. If an -// inappropriate value is passed, this function will panic. -// -// The second return argument indicates whether the given struct includes -// a "remain" field, and thus the returned schema is non-exhaustive. -// -// This uses the tags on the fields of the struct to discover how each -// field's value should be expressed within configuration. If an invalid -// mapping is attempted, this function will panic. -func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { - ty := reflect.TypeOf(val) - - if ty.Kind() == reflect.Ptr { - ty = ty.Elem() - } - - if ty.Kind() != reflect.Struct { - panic(fmt.Sprintf("given value must be struct, not %T", val)) - } - - var attrSchemas []hcl.AttributeSchema - var blockSchemas []hcl.BlockHeaderSchema - - tags := getFieldTags(ty) - - attrNames := make([]string, 0, len(tags.Attributes)) - for n := range tags.Attributes { - attrNames = append(attrNames, n) - } - sort.Strings(attrNames) - for _, n := range attrNames { - idx := tags.Attributes[n] - optional := tags.Optional[n] - field := ty.Field(idx) - - var required bool - - switch { - case field.Type.AssignableTo(exprType): - // If we're decoding to hcl.Expression then absense can be - // indicated via a null value, so we don't specify that - // the field is required during decoding. - required = false - case field.Type.Kind() != reflect.Ptr && !optional: - required = true - default: - required = false - } - - attrSchemas = append(attrSchemas, hcl.AttributeSchema{ - Name: n, - Required: required, - }) - } - - blockNames := make([]string, 0, len(tags.Blocks)) - for n := range tags.Blocks { - blockNames = append(blockNames, n) - } - sort.Strings(blockNames) - for _, n := range blockNames { - idx := tags.Blocks[n] - field := ty.Field(idx) - fty := field.Type - if fty.Kind() == reflect.Slice { - fty = fty.Elem() - } - if fty.Kind() == reflect.Ptr { - fty = fty.Elem() - } - if fty.Kind() != reflect.Struct { - panic(fmt.Sprintf( - "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, - )) - } - ftags := getFieldTags(fty) - var labelNames []string - if len(ftags.Labels) > 0 { - labelNames = make([]string, len(ftags.Labels)) - for i, l := range ftags.Labels { - labelNames[i] = l.Name - } - } - - blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ - Type: n, - LabelNames: labelNames, - }) - } - - partial = tags.Remain != nil - schema = &hcl.BodySchema{ - Attributes: attrSchemas, - Blocks: blockSchemas, - } - return schema, partial -} - -type fieldTags struct { - Attributes map[string]int - Blocks map[string]int - Labels []labelField - Remain *int - Optional map[string]bool -} - -type labelField struct { - FieldIndex int - Name string -} - -func getFieldTags(ty reflect.Type) *fieldTags { - ret := &fieldTags{ - Attributes: map[string]int{}, - Blocks: map[string]int{}, - Optional: map[string]bool{}, - } - - ct := ty.NumField() - for i := 0; i < ct; i++ { - field := ty.Field(i) - tag := field.Tag.Get("hcl") - if tag == "" { - continue - } - - comma := strings.Index(tag, ",") - var name, kind string - if comma != -1 { - name = tag[:comma] - kind = tag[comma+1:] - } else { - name = tag - kind = "attr" - } - - switch kind { - case "attr": - ret.Attributes[name] = i - case "block": - ret.Blocks[name] = i - case "label": - ret.Labels = append(ret.Labels, labelField{ - FieldIndex: i, - Name: name, - }) - case "remain": - if ret.Remain != nil { - panic("only one 'remain' tag is permitted") - } - idx := i // copy, because this loop will continue assigning to i - ret.Remain = &idx - case "optional": - ret.Attributes[name] = i - ret.Optional[name] = true - default: - panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) - } - } - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go deleted file mode 100644 index a8d00f8f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go +++ /dev/null @@ -1,16 +0,0 @@ -package gohcl - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" -) - -var victimExpr hcl.Expression -var victimBody hcl.Body - -var exprType = reflect.TypeOf(&victimExpr).Elem() -var bodyType = reflect.TypeOf(&victimBody).Elem() -var blockType = reflect.TypeOf((*hcl.Block)(nil)) -var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) -var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go deleted file mode 100644 index 71de4519..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go +++ /dev/null @@ -1,21 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -type blockLabel struct { - Value string - Range hcl.Range -} - -func labelsForBlock(block *hcl.Block) []blockLabel { - ret := make([]blockLabel, len(block.Labels)) - for i := range block.Labels { - ret[i] = blockLabel{ - Value: block.Labels[i], - Range: block.LabelRanges[i], - } - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go deleted file mode 100644 index c6e42236..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { - schema := ImpliedSchema(spec) - - var content *hcl.BodyContent - var diags hcl.Diagnostics - var leftovers hcl.Body - - if partial { - content, leftovers, diags = body.PartialContent(schema) - } else { - content, diags = body.Content(schema) - } - - val, valDiags := spec.decode(content, blockLabels, ctx) - diags = append(diags, valDiags...) - - return val, leftovers, diags -} - -func impliedType(spec Spec) cty.Type { - return spec.impliedType() -} - -func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - return spec.sourceRange(content, blockLabels) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go deleted file mode 100644 index 23bfe542..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package hcldec provides a higher-level API for unpacking the content of -// HCL bodies, implemented in terms of the low-level "Content" API exposed -// by the bodies themselves. -// -// It allows decoding an entire nested configuration in a single operation -// by providing a description of the intended structure. -// -// For some applications it may be more convenient to use the "gohcl" -// package, which has a similar purpose but decodes directly into native -// Go data types. hcldec instead targets the cty type system, and thus allows -// a cty-driven application to remain within that type system. -package hcldec diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go deleted file mode 100644 index e2027cfd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go +++ /dev/null @@ -1,23 +0,0 @@ -package hcldec - -import ( - "encoding/gob" -) - -func init() { - // Every Spec implementation should be registered with gob, so that - // specs can be sent over gob channels, such as using - // github.com/hashicorp/go-plugin with plugins that need to describe - // what shape of configuration they are expecting. - gob.Register(ObjectSpec(nil)) - gob.Register(TupleSpec(nil)) - gob.Register((*AttrSpec)(nil)) - gob.Register((*LiteralSpec)(nil)) - gob.Register((*ExprSpec)(nil)) - gob.Register((*BlockSpec)(nil)) - gob.Register((*BlockListSpec)(nil)) - gob.Register((*BlockSetSpec)(nil)) - gob.Register((*BlockMapSpec)(nil)) - gob.Register((*BlockLabelSpec)(nil)) - gob.Register((*DefaultSpec)(nil)) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go deleted file mode 100644 index 1fa548d0..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go +++ /dev/null @@ -1,81 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// Decode interprets the given body using the given specification and returns -// the resulting value. If the given body is not valid per the spec, error -// diagnostics are returned and the returned value is likely to be incomplete. -// -// The ctx argument may be nil, in which case any references to variables or -// functions will produce error diagnostics. -func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, _, diags := decode(body, nil, ctx, spec, false) - return val, diags -} - -// PartialDecode is like Decode except that it permits "leftover" items in -// the top-level body, which are returned as a new body to allow for -// further processing. -// -// Any descendent block bodies are _not_ decoded partially and thus must -// be fully described by the given specification. -func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { - return decode(body, nil, ctx, spec, true) -} - -// ImpliedType returns the value type that should result from decoding the -// given spec. -func ImpliedType(spec Spec) cty.Type { - return impliedType(spec) -} - -// SourceRange interprets the given body using the given specification and -// then returns the source range of the value that would be used to -// fulfill the spec. -// -// This can be used if application-level validation detects value errors, to -// obtain a reasonable SourceRange to use for generated diagnostics. It works -// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) -// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will -// be less useful the broader the specification, so e.g. a spec that returns -// the entirety of all of the blocks of a given type is likely to be -// _particularly_ arbitrary and useless. -// -// If the given body is not valid per the given spec, the result is best-effort -// and may not actually be something ideal. It's expected that an application -// will already have used Decode or PartialDecode earlier and thus had an -// opportunity to detect and report spec violations. -func SourceRange(body hcl.Body, spec Spec) hcl.Range { - return sourceRange(body, nil, spec) -} - -// ChildBlockTypes returns a map of all of the child block types declared -// by the given spec, with block type names as keys and the associated -// nested body specs as values. -func ChildBlockTypes(spec Spec) map[string]Spec { - ret := map[string]Spec{} - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if bs, ok := s.(blockSpec); ok { - for _, blockS := range bs.blockHeaderSchemata() { - nested := bs.nestedSpec() - if nested != nil { // nil can be returned to dynamically opt out of this interface - ret[blockS.Type] = nested - } - } - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go deleted file mode 100644 index ddbe7fa4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. -// This is the schema that the Decode function will use internally to -// access the content of a given body. -func ImpliedSchema(spec Spec) *hcl.BodySchema { - var attrs []hcl.AttributeSchema - var blocks []hcl.BlockHeaderSchema - - // visitSameBodyChildren walks through the spec structure, calling - // the given callback for each descendent spec encountered. We are - // interested in the specs that reference attributes and blocks. - var visit visitFunc - visit = func(s Spec) { - if as, ok := s.(attrSpec); ok { - attrs = append(attrs, as.attrSchemata()...) - } - - if bs, ok := s.(blockSpec); ok { - blocks = append(blocks, bs.blockHeaderSchemata()...) - } - - s.visitSameBodyChildren(visit) - } - - visit(spec) - - return &hcl.BodySchema{ - Attributes: attrs, - Blocks: blocks, - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go deleted file mode 100644 index a70818e1..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go +++ /dev/null @@ -1,1591 +0,0 @@ -package hcldec - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/customdecode" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// A Spec is a description of how to decode a hcl.Body to a cty.Value. -// -// The various other types in this package whose names end in "Spec" are -// the spec implementations. The most common top-level spec is ObjectSpec, -// which decodes body content into a cty.Value of an object type. -type Spec interface { - // Perform the decode operation on the given body, in the context of - // the given block (which might be null), using the given eval context. - // - // "block" is provided only by the nested calls performed by the spec - // types that work on block bodies. - decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) - - // Return the cty.Type that should be returned when decoding a body with - // this spec. - impliedType() cty.Type - - // Call the given callback once for each of the nested specs that would - // get decoded with the same body and block as the receiver. This should - // not descend into the nested specs used when decoding blocks. - visitSameBodyChildren(cb visitFunc) - - // Determine the source range of the value that would be returned for the - // spec in the given content, in the context of the given block - // (which might be null). If the corresponding item is missing, return - // a place where it might be inserted. - sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range -} - -type visitFunc func(spec Spec) - -// An ObjectSpec is a Spec that produces a cty.Value of an object type whose -// attributes correspond to the keys of the spec map. -type ObjectSpec map[string]Spec - -// attrSpec is implemented by specs that require attributes from the body. -type attrSpec interface { - attrSchemata() []hcl.AttributeSchema -} - -// blockSpec is implemented by specs that require blocks from the body. -type blockSpec interface { - blockHeaderSchemata() []hcl.BlockHeaderSchema - nestedSpec() Spec -} - -// specNeedingVariables is implemented by specs that can use variables -// from the EvalContext, to declare which variables they need. -type specNeedingVariables interface { - variablesNeeded(content *hcl.BodyContent) []hcl.Traversal -} - -func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make(map[string]cty.Value, len(s)) - var diags hcl.Diagnostics - - for k, spec := range s { - var kd hcl.Diagnostics - vals[k], kd = spec.decode(content, blockLabels, ctx) - diags = append(diags, kd...) - } - - return cty.ObjectVal(vals), diags -} - -func (s ObjectSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyObject - } - - attrTypes := make(map[string]cty.Type) - for k, childSpec := range s { - attrTypes[k] = childSpec.impliedType() - } - return cty.Object(attrTypes) -} - -func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose -// elements correspond to the elements of the spec slice. -type TupleSpec []Spec - -func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { - for _, c := range s { - cb(c) - } -} - -func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - vals := make([]cty.Value, len(s)) - var diags hcl.Diagnostics - - for i, spec := range s { - var ed hcl.Diagnostics - vals[i], ed = spec.decode(content, blockLabels, ctx) - diags = append(diags, ed...) - } - - return cty.TupleVal(vals), diags -} - -func (s TupleSpec) impliedType() cty.Type { - if len(s) == 0 { - return cty.EmptyTuple - } - - attrTypes := make([]cty.Type, len(s)) - for i, childSpec := range s { - attrTypes[i] = childSpec.impliedType() - } - return cty.Tuple(attrTypes) -} - -func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // This is not great, but the best we can do. In practice, it's rather - // strange to ask for the source range of an entire top-level body, since - // that's already readily available to the caller. - return content.MissingItemRange -} - -// An AttrSpec is a Spec that evaluates a particular attribute expression in -// the body and returns its resulting value converted to the requested type, -// or produces a diagnostic if the type is incorrect. -type AttrSpec struct { - Name string - Type cty.Type - Required bool -} - -func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - attr, exists := content.Attributes[s.Name] - if !exists { - return nil - } - - return attr.Expr.Variables() -} - -// attrSpec implementation -func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { - return []hcl.AttributeSchema{ - { - Name: s.Name, - Required: s.Required, - }, - } -} - -func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - attr, exists := content.Attributes[s.Name] - if !exists { - return content.MissingItemRange - } - - return attr.Expr.Range() -} - -func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - attr, exists := content.Attributes[s.Name] - if !exists { - // We don't need to check required and emit a diagnostic here, because - // that would already have happened when building "content". - return cty.NullVal(s.Type), nil - } - - if decodeFn := customdecode.CustomExpressionDecoderForType(s.Type); decodeFn != nil { - v, diags := decodeFn(attr.Expr, ctx) - if v == cty.NilVal { - v = cty.UnknownVal(s.Type) - } - return v, diags - } - - val, diags := attr.Expr.Value(ctx) - - convVal, err := convert.Convert(val, s.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect attribute value type", - Detail: fmt.Sprintf( - "Inappropriate value for attribute %q: %s.", - s.Name, err.Error(), - ), - Subject: attr.Expr.Range().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.Range()).Ptr(), - Expression: attr.Expr, - EvalContext: ctx, - }) - // We'll return an unknown value of the _correct_ type so that the - // incomplete result can still be used for some analysis use-cases. - val = cty.UnknownVal(s.Type) - } else { - val = convVal - } - - return val, diags -} - -func (s *AttrSpec) impliedType() cty.Type { - return s.Type -} - -// A LiteralSpec is a Spec that produces the given literal value, ignoring -// the given body. -type LiteralSpec struct { - Value cty.Value -} - -func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Value, nil -} - -func (s *LiteralSpec) impliedType() cty.Type { - return s.Value.Type() -} - -func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No sensible range to return for a literal, so the caller had better - // ensure it doesn't cause any diagnostics. - return hcl.Range{ - Filename: "", - } -} - -// An ExprSpec is a Spec that evaluates the given expression, ignoring the -// given body. -type ExprSpec struct { - Expr hcl.Expression -} - -func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// specNeedingVariables implementation -func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - return s.Expr.Variables() -} - -func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return s.Expr.Value(ctx) -} - -func (s *ExprSpec) impliedType() cty.Type { - // We can't know the type of our expression until we evaluate it - return cty.DynamicPseudoType -} - -func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - return s.Expr.Range() -} - -// A BlockSpec is a Spec that produces a cty.Value by decoding the contents -// of a single nested block of a given type, using a nested spec. -// -// If the Required flag is not set, the nested block may be omitted, in which -// case a null value is produced. If it _is_ set, an error diagnostic is -// produced if there are no nested blocks of the given type. -type BlockSpec struct { - TypeName string - Nested Spec - Required bool -} - -func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return nil - } - - return Variables(childBlock.Body, s.Nested) -} - -func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - if childBlock != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, childBlock.DefRange.String(), - ), - Subject: &candidate.DefRange, - }) - break - } - - childBlock = candidate - } - - if childBlock == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(s.Nested.impliedType()), diags - } - - if s.Nested == nil { - panic("BlockSpec with no Nested Spec") - } - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - return val, diags -} - -func (s *BlockSpec) impliedType() cty.Type { - return s.Nested.impliedType() -} - -func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockListSpec is a Spec that produces a cty list of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockListSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockListSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.ListValEmpty(s.Nested.impliedType()) - } else { - // Since our target is a list, all of the decoded elements must have the - // same type or cty.ListVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - ret = cty.ListVal(elems) - } - - return ret, diags -} - -func (s *BlockListSpec) impliedType() cty.Type { - return cty.List(s.Nested.impliedType()) -} - -func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockTupleSpec is a Spec that produces a cty tuple of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// This is similar to BlockListSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockTupleSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockTupleSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockListSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.EmptyTupleVal - } else { - ret = cty.TupleVal(elems) - } - - return ret, diags -} - -func (s *BlockTupleSpec) impliedType() cty.Type { - // We can't predict our type, because we don't know how many blocks - // there will be until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockSetSpec is a Spec that produces a cty set of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -type BlockSetSpec struct { - TypeName string - Nested Spec - MinItems int - MaxItems int -} - -func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: findLabelSpecs(s.Nested), - }, - } -} - -// blockSpec implementation -func (s *BlockSetSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockSetSpec with no Nested Spec") - } - - var elems []cty.Value - var sourceRanges []hcl.Range - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) - diags = append(diags, childDiags...) - elems = append(elems, val) - sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) - } - - if len(elems) < s.MinItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), - Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), - Subject: &content.MissingItemRange, - }) - } else if s.MaxItems > 0 && len(elems) > s.MaxItems { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), - Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), - Subject: &sourceRanges[s.MaxItems], - }) - } - - var ret cty.Value - - if len(elems) == 0 { - ret = cty.SetValEmpty(s.Nested.impliedType()) - } else { - // Since our target is a set, all of the decoded elements must have the - // same type or cty.SetVal will panic below. Different types can arise - // if there is an attribute spec of type cty.DynamicPseudoType in the - // nested spec; all given values must be convertable to a single type - // in order for the result to be considered valid. - etys := make([]cty.Type, len(elems)) - for i, v := range elems { - etys[i] = v.Type() - } - ety, convs := convert.UnifyUnsafe(etys) - if ety == cty.NilType { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: "Corresponding attributes in all blocks of this type must be the same.", - Subject: &sourceRanges[0], - }) - return cty.DynamicVal, diags - } - for i, v := range elems { - if convs[i] != nil { - newV, err := convs[i](v) - if err != nil { - // FIXME: This is a pretty terrible error message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), - Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), - Subject: &sourceRanges[i], - }) - // Bail early here so we won't panic below in cty.ListVal - return cty.DynamicVal, diags - } - elems[i] = newV - } - } - - ret = cty.SetVal(elems) - } - - return ret, diags -} - -func (s *BlockSetSpec) impliedType() cty.Type { - return cty.Set(s.Nested.impliedType()) -} - -func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockMapSpec is a Spec that produces a cty map of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of map structure is created for each of the given label names. -// There must be at least one given label name. -type BlockMapSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockMapSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockMapSpec with no Nested Spec") - } - if ImpliedType(s).HasDynamicTypes() { - panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.MapValEmpty(s.Nested.impliedType()), diags - } - - var ctyMap func(map[string]interface{}, int) cty.Value - ctyMap = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyMap(v.(map[string]interface{}), depth-1) - } - } - return cty.MapVal(vals) - } - - return ctyMap(elems, len(s.LabelNames)), diags -} - -func (s *BlockMapSpec) impliedType() cty.Type { - ret := s.Nested.impliedType() - for _ = range s.LabelNames { - ret = cty.Map(ret) - } - return ret -} - -func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockObjectSpec is a Spec that produces a cty object of the results of -// decoding all of the nested blocks of a given type, using a nested spec. -// -// One level of object structure is created for each of the given label names. -// There must be at least one given label name. -// -// This is similar to BlockMapSpec, but it permits the nested blocks to have -// different result types in situations where cty.DynamicPseudoType attributes -// are present. -type BlockObjectSpec struct { - TypeName string - LabelNames []string - Nested Spec -} - -func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node ("Nested" does not use the same body) -} - -// blockSpec implementation -func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), - }, - } -} - -// blockSpec implementation -func (s *BlockObjectSpec) nestedSpec() Spec { - return s.Nested -} - -// specNeedingVariables implementation -func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - var ret []hcl.Traversal - - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - ret = append(ret, Variables(childBlock.Body, s.Nested)...) - } - - return ret -} - -func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - if s.Nested == nil { - panic("BlockObjectSpec with no Nested Spec") - } - - elems := map[string]interface{}{} - for _, childBlock := range content.Blocks { - if childBlock.Type != s.TypeName { - continue - } - - childLabels := labelsForBlock(childBlock) - val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) - targetMap := elems - for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { - if _, exists := targetMap[key]; !exists { - targetMap[key] = make(map[string]interface{}) - } - targetMap = targetMap[key].(map[string]interface{}) - } - - diags = append(diags, childDiags...) - - key := childBlock.Labels[len(s.LabelNames)-1] - if _, exists := targetMap[key]; exists { - labelsBuf := bytes.Buffer{} - for _, label := range childBlock.Labels { - fmt.Fprintf(&labelsBuf, " %q", label) - } - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block for %s%s was already defined. The %s labels must be unique.", - s.TypeName, labelsBuf.String(), s.TypeName, - ), - Subject: &childBlock.DefRange, - }) - continue - } - - targetMap[key] = val - } - - if len(elems) == 0 { - return cty.EmptyObjectVal, diags - } - - var ctyObj func(map[string]interface{}, int) cty.Value - ctyObj = func(raw map[string]interface{}, depth int) cty.Value { - vals := make(map[string]cty.Value, len(raw)) - if depth == 1 { - for k, v := range raw { - vals[k] = v.(cty.Value) - } - } else { - for k, v := range raw { - vals[k] = ctyObj(v.(map[string]interface{}), depth-1) - } - } - return cty.ObjectVal(vals) - } - - return ctyObj(elems, len(s.LabelNames)), diags -} - -func (s *BlockObjectSpec) impliedType() cty.Type { - // We can't predict our type, since we don't know how many blocks are - // present and what labels they have until we decode. - return cty.DynamicPseudoType -} - -func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We return the source range of the _first_ block of the given type, - // since they are not guaranteed to form a contiguous range. - - var childBlock *hcl.Block - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - - childBlock = candidate - break - } - - if childBlock == nil { - return content.MissingItemRange - } - - return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) -} - -// A BlockAttrsSpec is a Spec that interprets a single block as if it were -// a map of some element type. That is, each attribute within the block -// becomes a key in the resulting map and the attribute's value becomes the -// element value, after conversion to the given element type. The resulting -// value is a cty.Map of the given element type. -// -// This spec imposes a validation constraint that there be exactly one block -// of the given type name and that this block may contain only attributes. The -// block does not accept any labels. -// -// This is an alternative to an AttrSpec of a map type for situations where -// block syntax is desired. Note that block syntax does not permit dynamic -// keys, construction of the result via a "for" expression, etc. In most cases -// an AttrSpec is preferred if the desired result is a map whose keys are -// chosen by the user rather than by schema. -type BlockAttrsSpec struct { - TypeName string - ElementType cty.Type - Required bool -} - -func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -// blockSpec implementation -func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - return []hcl.BlockHeaderSchema{ - { - Type: s.TypeName, - LabelNames: nil, - }, - } -} - -// blockSpec implementation -func (s *BlockAttrsSpec) nestedSpec() Spec { - // This is an odd case: we aren't actually going to apply a nested spec - // in this case, since we're going to interpret the body directly as - // attributes, but we need to return something non-nil so that the - // decoder will recognize this as a block spec. We won't actually be - // using this for anything at decode time. - return noopSpec{} -} - -// specNeedingVariables implementation -func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { - - block, _ := s.findBlock(content) - if block == nil { - return nil - } - - var vars []hcl.Traversal - - attrs, diags := block.Body.JustAttributes() - if diags.HasErrors() { - return nil - } - - for _, attr := range attrs { - vars = append(vars, attr.Expr.Variables()...) - } - - // We'll return the variables references in source order so that any - // error messages that result are also in source order. - sort.Slice(vars, func(i, j int) bool { - return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte - }) - - return vars -} - -func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - var diags hcl.Diagnostics - - block, other := s.findBlock(content) - if block == nil { - if s.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Missing %s block", s.TypeName), - Detail: fmt.Sprintf( - "A block of type %q is required here.", s.TypeName, - ), - Subject: &content.MissingItemRange, - }) - } - return cty.NullVal(cty.Map(s.ElementType)), diags - } - if other != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), - Detail: fmt.Sprintf( - "Only one block of type %q is allowed. Previous definition was at %s.", - s.TypeName, block.DefRange.String(), - ), - Subject: &other.DefRange, - }) - } - - attrs, attrDiags := block.Body.JustAttributes() - diags = append(diags, attrDiags...) - - if len(attrs) == 0 { - return cty.MapValEmpty(s.ElementType), diags - } - - vals := make(map[string]cty.Value, len(attrs)) - for name, attr := range attrs { - if decodeFn := customdecode.CustomExpressionDecoderForType(s.ElementType); decodeFn != nil { - attrVal, attrDiags := decodeFn(attr.Expr, ctx) - diags = append(diags, attrDiags...) - if attrVal == cty.NilVal { - attrVal = cty.UnknownVal(s.ElementType) - } - vals[name] = attrVal - continue - } - - attrVal, attrDiags := attr.Expr.Value(ctx) - diags = append(diags, attrDiags...) - - attrVal, err := convert.Convert(attrVal, s.ElementType) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid attribute value", - Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), - Subject: attr.Expr.Range().Ptr(), - Context: hcl.RangeBetween(attr.NameRange, attr.Expr.Range()).Ptr(), - Expression: attr.Expr, - EvalContext: ctx, - }) - attrVal = cty.UnknownVal(s.ElementType) - } - - vals[name] = attrVal - } - - return cty.MapVal(vals), diags -} - -func (s *BlockAttrsSpec) impliedType() cty.Type { - return cty.Map(s.ElementType) -} - -func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - block, _ := s.findBlock(content) - if block == nil { - return content.MissingItemRange - } - return block.DefRange -} - -func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { - for _, candidate := range content.Blocks { - if candidate.Type != s.TypeName { - continue - } - if block != nil { - return block, candidate - } - block = candidate - } - - return block, nil -} - -// A BlockLabelSpec is a Spec that returns a cty.String representing the -// label of the block its given body belongs to, if indeed its given body -// belongs to a block. It is a programming error to use this in a non-block -// context, so this spec will panic in that case. -// -// This spec only works in the nested spec within a BlockSpec, BlockListSpec, -// BlockSetSpec or BlockMapSpec. -// -// The full set of label specs used against a particular block must have a -// consecutive set of indices starting at zero. The maximum index found -// defines how many labels the corresponding blocks must have in cty source. -type BlockLabelSpec struct { - Index int - Name string -} - -func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { - // leaf node -} - -func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return cty.StringVal(blockLabels[s.Index].Value), nil -} - -func (s *BlockLabelSpec) impliedType() cty.Type { - return cty.String // labels are always strings -} - -func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - if s.Index >= len(blockLabels) { - panic("BlockListSpec used in non-block context") - } - - return blockLabels[s.Index].Range -} - -func findLabelSpecs(spec Spec) []string { - maxIdx := -1 - var names map[int]string - - var visit visitFunc - visit = func(s Spec) { - if ls, ok := s.(*BlockLabelSpec); ok { - if maxIdx < ls.Index { - maxIdx = ls.Index - } - if names == nil { - names = make(map[int]string) - } - names[ls.Index] = ls.Name - } - s.visitSameBodyChildren(visit) - } - - visit(spec) - - if maxIdx < 0 { - return nil // no labels at all - } - - ret := make([]string, maxIdx+1) - for i := range ret { - name := names[i] - if name == "" { - // Should never happen if the spec is conformant, since we require - // consecutive indices starting at zero. - name = fmt.Sprintf("missing%02d", i) - } - ret[i] = name - } - - return ret -} - -// DefaultSpec is a spec that wraps two specs, evaluating the primary first -// and then evaluating the default if the primary returns a null value. -// -// The two specifications must have the same implied result type for correct -// operation. If not, the result is undefined. -// -// Any requirements imposed by the "Default" spec apply even if "Primary" does -// not return null. For example, if the "Default" spec is for a required -// attribute then that attribute is always required, regardless of the result -// of the "Primary" spec. -// -// The "Default" spec must not describe a nested block, since otherwise the -// result of ChildBlockTypes would not be decidable without evaluation. If -// the default spec _does_ describe a nested block then the result is -// undefined. -type DefaultSpec struct { - Primary Spec - Default Spec -} - -func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Primary) - cb(s.Default) -} - -func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - val, diags := s.Primary.decode(content, blockLabels, ctx) - if val.IsNull() { - var moreDiags hcl.Diagnostics - val, moreDiags = s.Default.decode(content, blockLabels, ctx) - diags = append(diags, moreDiags...) - } - return val, diags -} - -func (s *DefaultSpec) impliedType() cty.Type { - return s.Primary.impliedType() -} - -// attrSpec implementation -func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { - // We must pass through the union of both of our nested specs so that - // we'll have both values available in the result. - var ret []hcl.AttributeSchema - if as, ok := s.Primary.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - if as, ok := s.Default.(attrSpec); ok { - ret = append(ret, as.attrSchemata()...) - } - return ret -} - -// blockSpec implementation -func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { - // Only the primary spec may describe a block, since otherwise - // our nestedSpec method below can't know which to return. - if bs, ok := s.Primary.(blockSpec); ok { - return bs.blockHeaderSchemata() - } - return nil -} - -// blockSpec implementation -func (s *DefaultSpec) nestedSpec() Spec { - if bs, ok := s.Primary.(blockSpec); ok { - return bs.nestedSpec() - } - return nil -} - -func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We can't tell from here which of the two specs will ultimately be used - // in our result, so we'll just assume the first. This is usually the right - // choice because the default is often a literal spec that doesn't have a - // reasonable source range to return anyway. - return s.Primary.sourceRange(content, blockLabels) -} - -// TransformExprSpec is a spec that wraps another and then evaluates a given -// hcl.Expression on the result. -// -// The implied type of this spec is determined by evaluating the expression -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -type TransformExprSpec struct { - Wrapped Spec - Expr hcl.Expression - TransformCtx *hcl.EvalContext - VarName string -} - -func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: wrappedVal, - } - resultVal, resultDiags := s.Expr.Value(chiCtx) - diags = append(diags, resultDiags...) - return resultVal, diags -} - -func (s *TransformExprSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - chiCtx := s.TransformCtx.NewChild() - chiCtx.Variables = map[string]cty.Value{ - s.VarName: cty.UnknownVal(wrappedTy), - } - resultVal, _ := s.Expr.Value(chiCtx) - return resultVal.Type() -} - -func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// TransformFuncSpec is a spec that wraps another and then evaluates a given -// cty function with the result. The given function must expect exactly one -// argument, where the result of the wrapped spec will be passed. -// -// The implied type of this spec is determined by type-checking the function -// with an unknown value of the nested spec's implied type, which may cause -// the result to be imprecise. This spec should not be used in situations where -// precise result type information is needed. -// -// If the given function produces an error when run, this spec will produce -// a non-user-actionable diagnostic message. It's the caller's responsibility -// to ensure that the given function cannot fail for any non-error result -// of the wrapped spec. -type TransformFuncSpec struct { - Wrapped Spec - Func function.Function -} - -func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { - cb(s.Wrapped) -} - -func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) - if diags.HasErrors() { - // We won't try to run our function in this case, because it'll probably - // generate confusing additional errors that will distract from the - // root cause. - return cty.UnknownVal(s.impliedType()), diags - } - - resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) - if err != nil { - // This is not a good example of a diagnostic because it is reporting - // a programming error in the calling application, rather than something - // an end-user could act on. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Transform function failed", - Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), - Subject: s.sourceRange(content, blockLabels).Ptr(), - }) - return cty.UnknownVal(s.impliedType()), diags - } - - return resultVal, diags -} - -func (s *TransformFuncSpec) impliedType() cty.Type { - wrappedTy := s.Wrapped.impliedType() - resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) - if err != nil { - // Should never happen with a correctly-configured spec - return cty.DynamicPseudoType - } - - return resultTy -} - -func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // We'll just pass through our wrapped range here, even though that's - // not super-accurate, because there's nothing better to return. - return s.Wrapped.sourceRange(content, blockLabels) -} - -// noopSpec is a placeholder spec that does nothing, used in situations where -// a non-nil placeholder spec is required. It is not exported because there is -// no reason to use it directly; it is always an implementation detail only. -type noopSpec struct { -} - -func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - return cty.NullVal(cty.DynamicPseudoType), nil -} - -func (s noopSpec) impliedType() cty.Type { - return cty.DynamicPseudoType -} - -func (s noopSpec) visitSameBodyChildren(cb visitFunc) { - // nothing to do -} - -func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { - // No useful range for a noopSpec, and nobody should be calling this anyway. - return hcl.Range{ - Filename: "noopSpec", - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go deleted file mode 100644 index f8440eb6..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcldec - -import ( - "github.com/hashicorp/hcl/v2" -) - -// Variables processes the given body with the given spec and returns a -// list of the variable traversals that would be required to decode -// the same pairing of body and spec. -// -// This can be used to conditionally populate the variables in the EvalContext -// passed to Decode, for applications where a static scope is insufficient. -// -// If the given body is not compliant with the given schema, the result may -// be incomplete, but that's assumed to be okay because the eventual call -// to Decode will produce error diagnostics anyway. -func Variables(body hcl.Body, spec Spec) []hcl.Traversal { - var vars []hcl.Traversal - schema := ImpliedSchema(spec) - content, _, _ := body.PartialContent(schema) - - if vs, ok := spec.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - - var visitFn visitFunc - visitFn = func(s Spec) { - if vs, ok := s.(specNeedingVariables); ok { - vars = append(vars, vs.variablesNeeded(content)...) - } - s.visitSameBodyChildren(visitFn) - } - spec.visitSameBodyChildren(visitFn) - - return vars -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go deleted file mode 100644 index 1a801448..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package hcled provides functionality intended to help an application -// that embeds HCL to deliver relevant information to a text editor or IDE -// for navigating around and analyzing configuration files. -package hcled diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go deleted file mode 100644 index 050ad758..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go +++ /dev/null @@ -1,34 +0,0 @@ -package hcled - -import ( - "github.com/hashicorp/hcl/v2" -) - -type contextStringer interface { - ContextString(offset int) string -} - -// ContextString returns a string describing the context of the given byte -// offset, if available. An empty string is returned if no such information -// is available, or otherwise the returned string is in a form that depends -// on the language used to write the referenced file. -func ContextString(file *hcl.File, offset int) string { - if cser, ok := file.Nav.(contextStringer); ok { - return cser.ContextString(offset) - } - return "" -} - -type contextDefRanger interface { - ContextDefRange(offset int) hcl.Range -} - -func ContextDefRange(file *hcl.File, offset int) hcl.Range { - if cser, ok := file.Nav.(contextDefRanger); ok { - defRange := cser.ContextDefRange(offset) - if !defRange.Empty() { - return defRange - } - } - return file.Body.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go deleted file mode 100644 index 1dc2eccd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go +++ /dev/null @@ -1,135 +0,0 @@ -// Package hclparse has the main API entry point for parsing both HCL native -// syntax and HCL JSON. -// -// The main HCL package also includes SimpleParse and SimpleParseFile which -// can be a simpler interface for the common case where an application just -// needs to parse a single file. The gohcl package simplifies that further -// in its SimpleDecode function, which combines hcl.SimpleParse with decoding -// into Go struct values -// -// Package hclparse, then, is useful for applications that require more fine -// control over parsing or which need to load many separate files and keep -// track of them for possible error reporting or other analysis. -package hclparse - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2/json" -) - -// NOTE: This is the public interface for parsing. The actual parsers are -// in other packages alongside this one, with this package just wrapping them -// to provide a unified interface for the caller across all supported formats. - -// Parser is the main interface for parsing configuration files. As well as -// parsing files, a parser also retains a registry of all of the files it -// has parsed so that multiple attempts to parse the same file will return -// the same object and so the collected files can be used when printing -// diagnostics. -// -// Any diagnostics for parsing a file are only returned once on the first -// call to parse that file. Callers are expected to collect up diagnostics -// and present them together, so returning diagnostics for the same file -// multiple times would create a confusing result. -type Parser struct { - files map[string]*hcl.File -} - -// NewParser creates a new parser, ready to parse configuration files. -func NewParser() *Parser { - return &Parser{ - files: map[string]*hcl.File{}, - } -} - -// ParseHCL parses the given buffer (which is assumed to have been loaded from -// the given filename) as a native-syntax configuration file and returns the -// hcl.File object representing it. -func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) - p.files[filename] = file - return file, diags -} - -// ParseHCLFile reads the given filename and parses it as a native-syntax HCL -// configuration file. An error diagnostic is returned if the given file -// cannot be read. -func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - src, err := ioutil.ReadFile(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), - }, - } - } - - return p.ParseHCL(src, filename) -} - -// ParseJSON parses the given JSON buffer (which is assumed to have been loaded -// from the given filename) and returns the hcl.File object representing it. -func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.Parse(src, filename) - p.files[filename] = file - return file, diags -} - -// ParseJSONFile reads the given filename and parses it as JSON, similarly to -// ParseJSON. An error diagnostic is returned if the given file cannot be read. -func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { - if existing := p.files[filename]; existing != nil { - return existing, nil - } - - file, diags := json.ParseFile(filename) - p.files[filename] = file - return file, diags -} - -// AddFile allows a caller to record in a parser a file that was parsed some -// other way, thus allowing it to be included in the registry of sources. -func (p *Parser) AddFile(filename string, file *hcl.File) { - p.files[filename] = file -} - -// Sources returns a map from filenames to the raw source code that was -// read from them. This is intended to be used, for example, to print -// diagnostics with contextual information. -// -// The arrays underlying the returned slices should not be modified. -func (p *Parser) Sources() map[string][]byte { - ret := make(map[string][]byte) - for fn, f := range p.files { - ret[fn] = f.Bytes - } - return ret -} - -// Files returns a map from filenames to the File objects produced from them. -// This is intended to be used, for example, to print diagnostics with -// contextual information. -// -// The returned map and all of the objects it refers to directly or indirectly -// must not be modified. -func (p *Parser) Files() map[string]*hcl.File { - return p.files -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go deleted file mode 100644 index 09041652..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" -) - -type File struct { - inTree - - srcBytes []byte - body *node -} - -// NewEmptyFile constructs a new file with no content, ready to be mutated -// by other calls that append to its body. -func NewEmptyFile() *File { - f := &File{ - inTree: newInTree(), - } - body := newBody() - f.body = f.children.Append(body) - return f -} - -// Body returns the root body of the file, which contains the top-level -// attributes and blocks. -func (f *File) Body() *Body { - return f.body.content.(*Body) -} - -// WriteTo writes the tokens underlying the receiving file to the given writer. -// -// The tokens first have a simple formatting pass applied that adjusts only -// the spaces between them. -func (f *File) WriteTo(wr io.Writer) (int64, error) { - tokens := f.inTree.children.BuildTokens(nil) - format(tokens) - return tokens.WriteTo(wr) -} - -// Bytes returns a buffer containing the source code resulting from the -// tokens underlying the receiving file. If any updates have been made via -// the AST API, these will be reflected in the result. -func (f *File) Bytes() []byte { - buf := &bytes.Buffer{} - f.WriteTo(buf) - return buf.Bytes() -} - -type comments struct { - leafNode - - parent *node - tokens Tokens -} - -func newComments(tokens Tokens) *comments { - return &comments{ - tokens: tokens, - } -} - -func (c *comments) BuildTokens(to Tokens) Tokens { - return c.tokens.BuildTokens(to) -} - -type identifier struct { - leafNode - - parent *node - token *Token -} - -func newIdentifier(token *Token) *identifier { - return &identifier{ - token: token, - } -} - -func (i *identifier) BuildTokens(to Tokens) Tokens { - return append(to, i.token) -} - -func (i *identifier) hasName(name string) bool { - return name == string(i.token.Bytes) -} - -type number struct { - leafNode - - parent *node - token *Token -} - -func newNumber(token *Token) *number { - return &number{ - token: token, - } -} - -func (n *number) BuildTokens(to Tokens) Tokens { - return append(to, n.token) -} - -type quoted struct { - leafNode - - parent *node - tokens Tokens -} - -func newQuoted(tokens Tokens) *quoted { - return "ed{ - tokens: tokens, - } -} - -func (q *quoted) BuildTokens(to Tokens) Tokens { - return q.tokens.BuildTokens(to) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go deleted file mode 100644 index 609419ff..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go +++ /dev/null @@ -1,48 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type Attribute struct { - inTree - - leadComments *node - name *node - expr *node - lineComments *node -} - -func newAttribute() *Attribute { - return &Attribute{ - inTree: newInTree(), - } -} - -func (a *Attribute) init(name string, expr *Expression) { - expr.assertUnattached() - - nameTok := newIdentToken(name) - nameObj := newIdentifier(nameTok) - a.leadComments = a.children.Append(newComments(nil)) - a.name = a.children.Append(nameObj) - a.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }, - }) - a.expr = a.children.Append(expr) - a.expr.list = a.children - a.lineComments = a.children.Append(newComments(nil)) - a.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} - -func (a *Attribute) Expr() *Expression { - return a.expr.content.(*Expression) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go deleted file mode 100644 index f7d3921b..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go +++ /dev/null @@ -1,118 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Block struct { - inTree - - leadComments *node - typeName *node - labels nodeSet - open *node - body *node - close *node -} - -func newBlock() *Block { - return &Block{ - inTree: newInTree(), - labels: newNodeSet(), - } -} - -// NewBlock constructs a new, empty block with the given type name and labels. -func NewBlock(typeName string, labels []string) *Block { - block := newBlock() - block.init(typeName, labels) - return block -} - -func (b *Block) init(typeName string, labels []string) { - nameTok := newIdentToken(typeName) - nameObj := newIdentifier(nameTok) - b.leadComments = b.children.Append(newComments(nil)) - b.typeName = b.children.Append(nameObj) - for _, label := range labels { - labelToks := TokensForValue(cty.StringVal(label)) - labelObj := newQuoted(labelToks) - labelNode := b.children.Append(labelObj) - b.labels.Add(labelNode) - } - b.open = b.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }, - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) - body := newBody() // initially totally empty; caller can append to it subsequently - b.body = b.children.Append(body) - b.close = b.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }, - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} - -// Body returns the body that represents the content of the receiving block. -// -// Appending to or otherwise modifying this body will make changes to the -// tokens that are generated between the blocks open and close braces. -func (b *Block) Body() *Body { - return b.body.content.(*Body) -} - -// Type returns the type name of the block. -func (b *Block) Type() string { - typeNameObj := b.typeName.content.(*identifier) - return string(typeNameObj.token.Bytes) -} - -// Labels returns the labels of the block. -func (b *Block) Labels() []string { - labelNames := make([]string, 0, len(b.labels)) - list := b.labels.List() - - for _, label := range list { - switch labelObj := label.content.(type) { - case *identifier: - if labelObj.token.Type == hclsyntax.TokenIdent { - labelString := string(labelObj.token.Bytes) - labelNames = append(labelNames, labelString) - } - - case *quoted: - tokens := labelObj.tokens - if len(tokens) == 3 && - tokens[0].Type == hclsyntax.TokenOQuote && - tokens[1].Type == hclsyntax.TokenQuotedLit && - tokens[2].Type == hclsyntax.TokenCQuote { - // Note that TokenQuotedLit may contain escape sequences. - labelString, diags := hclsyntax.ParseStringLiteralToken(tokens[1].asHCLSyntax()) - - // If parsing the string literal returns error diagnostics - // then we can just assume the label doesn't match, because it's invalid in some way. - if !diags.HasErrors() { - labelNames = append(labelNames, labelString) - } - } - - default: - // If neither of the previous cases are true (should be impossible) - // then we can just ignore it, because it's invalid too. - } - } - - return labelNames -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go deleted file mode 100644 index 119f53e6..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go +++ /dev/null @@ -1,239 +0,0 @@ -package hclwrite - -import ( - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Body struct { - inTree - - items nodeSet -} - -func newBody() *Body { - return &Body{ - inTree: newInTree(), - items: newNodeSet(), - } -} - -func (b *Body) appendItem(c nodeContent) *node { - nn := b.children.Append(c) - b.items.Add(nn) - return nn -} - -func (b *Body) appendItemNode(nn *node) *node { - nn.assertUnattached() - b.children.AppendNode(nn) - b.items.Add(nn) - return nn -} - -// Clear removes all of the items from the body, making it empty. -func (b *Body) Clear() { - b.children.Clear() -} - -func (b *Body) AppendUnstructuredTokens(ts Tokens) { - b.inTree.children.Append(ts) -} - -// Attributes returns a new map of all of the attributes in the body, with -// the attribute names as the keys. -func (b *Body) Attributes() map[string]*Attribute { - ret := make(map[string]*Attribute) - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - name := string(nameObj.token.Bytes) - ret[name] = attr - } - } - return ret -} - -// Blocks returns a new slice of all the blocks in the body. -func (b *Body) Blocks() []*Block { - ret := make([]*Block, 0, len(b.items)) - for _, n := range b.items.List() { - if block, isBlock := n.content.(*Block); isBlock { - ret = append(ret, block) - } - } - return ret -} - -// GetAttribute returns the attribute from the body that has the given name, -// or returns nil if there is currently no matching attribute. -func (b *Body) GetAttribute(name string) *Attribute { - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - if nameObj.hasName(name) { - // We've found it! - return attr - } - } - } - - return nil -} - -// getAttributeNode is like GetAttribute but it returns the node containing -// the selected attribute (if one is found) rather than the attribute itself. -func (b *Body) getAttributeNode(name string) *node { - for n := range b.items { - if attr, isAttr := n.content.(*Attribute); isAttr { - nameObj := attr.name.content.(*identifier) - if nameObj.hasName(name) { - // We've found it! - return n - } - } - } - - return nil -} - -// FirstMatchingBlock returns a first matching block from the body that has the -// given name and labels or returns nil if there is currently no matching -// block. -func (b *Body) FirstMatchingBlock(typeName string, labels []string) *Block { - for _, block := range b.Blocks() { - if typeName == block.Type() { - labelNames := block.Labels() - if len(labels) == 0 && len(labelNames) == 0 { - return block - } - if reflect.DeepEqual(labels, labelNames) { - return block - } - } - } - - return nil -} - -// RemoveBlock removes the given block from the body, if it's in that body. -// If it isn't present, this is a no-op. -// -// Returns true if it removed something, or false otherwise. -func (b *Body) RemoveBlock(block *Block) bool { - for n := range b.items { - if n.content == block { - n.Detach() - b.items.Remove(n) - return true - } - } - return false -} - -// SetAttributeRaw either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the block, -// using the given tokens verbatim as the expression. -// -// The same caveats apply to this function as for NewExpressionRaw on which -// it is based. If possible, prefer to use SetAttributeValue or -// SetAttributeTraversal. -func (b *Body) SetAttributeRaw(name string, tokens Tokens) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionRaw(tokens) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// SetAttributeValue either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the block. -// -// The value is given as a cty.Value, and must therefore be a literal. To set -// a variable reference or other traversal, use SetAttributeTraversal. -// -// The return value is the attribute that was either modified in-place or -// created. -func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionLiteral(val) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// SetAttributeTraversal either replaces the expression of an existing attribute -// of the given name or adds a new attribute definition to the end of the body. -// -// The new expression is given as a hcl.Traversal, which must be an absolute -// traversal. To set a literal value, use SetAttributeValue. -// -// The return value is the attribute that was either modified in-place or -// created. -func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute { - attr := b.GetAttribute(name) - expr := NewExpressionAbsTraversal(traversal) - if attr != nil { - attr.expr = attr.expr.ReplaceWith(expr) - } else { - attr := newAttribute() - attr.init(name, expr) - b.appendItem(attr) - } - return attr -} - -// RemoveAttribute removes the attribute with the given name from the body. -// -// The return value is the attribute that was removed, or nil if there was -// no such attribute (in which case the call was a no-op). -func (b *Body) RemoveAttribute(name string) *Attribute { - node := b.getAttributeNode(name) - if node == nil { - return nil - } - node.Detach() - b.items.Remove(node) - return node.content.(*Attribute) -} - -// AppendBlock appends an existing block (which must not be already attached -// to a body) to the end of the receiving body. -func (b *Body) AppendBlock(block *Block) *Block { - b.appendItem(block) - return block -} - -// AppendNewBlock appends a new nested block to the end of the receiving body -// with the given type name and labels. -func (b *Body) AppendNewBlock(typeName string, labels []string) *Block { - block := newBlock() - block.init(typeName, labels) - b.appendItem(block) - return block -} - -// AppendNewline appends a newline token to th end of the receiving body, -// which generally serves as a separator between different sets of body -// contents. -func (b *Body) AppendNewline() { - b.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenNewline, - Bytes: []byte{'\n'}, - }, - }) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go deleted file mode 100644 index 073c3087..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go +++ /dev/null @@ -1,224 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -type Expression struct { - inTree - - absTraversals nodeSet -} - -func newExpression() *Expression { - return &Expression{ - inTree: newInTree(), - absTraversals: newNodeSet(), - } -} - -// NewExpressionRaw constructs an expression containing the given raw tokens. -// -// There is no automatic validation that the given tokens produce a valid -// expression. Callers of thus function must take care to produce invalid -// expression tokens. Where possible, use the higher-level functions -// NewExpressionLiteral or NewExpressionAbsTraversal instead. -// -// Because NewExpressionRaw does not interpret the given tokens in any way, -// an expression created by NewExpressionRaw will produce an empty result -// for calls to its method Variables, even if the given token sequence -// contains a subslice that would normally be interpreted as a traversal under -// parsing. -func NewExpressionRaw(tokens Tokens) *Expression { - expr := newExpression() - // We copy the tokens here in order to make sure that later mutations - // by the caller don't inadvertently cause our expression to become - // invalid. - copyTokens := make(Tokens, len(tokens)) - copy(copyTokens, tokens) - expr.children.AppendUnstructuredTokens(copyTokens) - return expr -} - -// NewExpressionLiteral constructs an an expression that represents the given -// literal value. -// -// Since an unknown value cannot be represented in source code, this function -// will panic if the given value is unknown or contains a nested unknown value. -// Use val.IsWhollyKnown before calling to be sure. -// -// HCL native syntax does not directly represent lists, maps, and sets, and -// instead relies on the automatic conversions to those collection types from -// either list or tuple constructor syntax. Therefore converting collection -// values to source code and re-reading them will lose type information, and -// the reader must provide a suitable type at decode time to recover the -// original value. -func NewExpressionLiteral(val cty.Value) *Expression { - toks := TokensForValue(val) - expr := newExpression() - expr.children.AppendUnstructuredTokens(toks) - return expr -} - -// NewExpressionAbsTraversal constructs an expression that represents the -// given traversal, which must be absolute or this function will panic. -func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression { - if traversal.IsRelative() { - panic("can't construct expression from relative traversal") - } - - physT := newTraversal() - rootName := traversal.RootName() - steps := traversal[1:] - - { - tn := newTraverseName() - tn.name = tn.children.Append(newIdentifier(&Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(rootName), - })) - physT.steps.Add(physT.children.Append(tn)) - } - - for _, step := range steps { - switch ts := step.(type) { - case hcl.TraverseAttr: - tn := newTraverseName() - tn.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - }) - tn.name = tn.children.Append(newIdentifier(&Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - })) - physT.steps.Add(physT.children.Append(tn)) - case hcl.TraverseIndex: - ti := newTraverseIndex() - ti.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }, - }) - indexExpr := NewExpressionLiteral(ts.Key) - ti.key = ti.children.Append(indexExpr) - ti.children.AppendUnstructuredTokens(Tokens{ - { - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }, - }) - physT.steps.Add(physT.children.Append(ti)) - } - } - - expr := newExpression() - expr.absTraversals.Add(expr.children.Append(physT)) - return expr -} - -// Variables returns the absolute traversals that exist within the receiving -// expression. -func (e *Expression) Variables() []*Traversal { - nodes := e.absTraversals.List() - ret := make([]*Traversal, len(nodes)) - for i, node := range nodes { - ret[i] = node.content.(*Traversal) - } - return ret -} - -// RenameVariablePrefix examines each of the absolute traversals in the -// receiving expression to see if they have the given sequence of names as -// a prefix prefix. If so, they are updated in place to have the given -// replacement names instead of that prefix. -// -// This can be used to implement symbol renaming. The calling application can -// visit all relevant expressions in its input and apply the same renaming -// to implement a global symbol rename. -// -// The search and replacement traversals must be the same length, or this -// method will panic. Only attribute access operations can be matched and -// replaced. Index steps never match the prefix. -func (e *Expression) RenameVariablePrefix(search, replacement []string) { - if len(search) != len(replacement) { - panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement))) - } -Traversals: - for node := range e.absTraversals { - traversal := node.content.(*Traversal) - if len(traversal.steps) < len(search) { - // If it's shorter then it can't have our prefix - continue - } - - stepNodes := traversal.steps.List() - for i, name := range search { - step, isName := stepNodes[i].content.(*TraverseName) - if !isName { - continue Traversals // only name nodes can match - } - foundNameBytes := step.name.content.(*identifier).token.Bytes - if len(foundNameBytes) != len(name) { - continue Traversals - } - if string(foundNameBytes) != name { - continue Traversals - } - } - - // If we get here then the prefix matched, so now we'll swap in - // the replacement strings. - for i, name := range replacement { - step := stepNodes[i].content.(*TraverseName) - token := step.name.content.(*identifier).token - token.Bytes = []byte(name) - } - } -} - -// Traversal represents a sequence of variable, attribute, and/or index -// operations. -type Traversal struct { - inTree - - steps nodeSet -} - -func newTraversal() *Traversal { - return &Traversal{ - inTree: newInTree(), - steps: newNodeSet(), - } -} - -type TraverseName struct { - inTree - - name *node -} - -func newTraverseName() *TraverseName { - return &TraverseName{ - inTree: newInTree(), - } -} - -type TraverseIndex struct { - inTree - - key *node -} - -func newTraverseIndex() *TraverseIndex { - return &TraverseIndex{ - inTree: newInTree(), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go deleted file mode 100644 index 56d5b775..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hclwrite deals with the problem of generating HCL configuration -// and of making specific surgical changes to existing HCL configurations. -// -// It operates at a different level of abstraction than the main HCL parser -// and AST, since details such as the placement of comments and newlines -// are preserved when unchanged. -// -// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes -// to be read out, created and inserted, etc. Nodes represent syntax constructs -// rather than semantic concepts. -package hclwrite diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go deleted file mode 100644 index b94bee38..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go +++ /dev/null @@ -1,463 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'}) - -// placeholder token used when we don't have a token but we don't want -// to pass a real "nil" and complicate things with nil pointer checks -var nilToken = &Token{ - Type: hclsyntax.TokenNil, - Bytes: []byte{}, - SpacesBefore: 0, -} - -// format rewrites tokens within the given sequence, in-place, to adjust the -// whitespace around their content to achieve canonical formatting. -func format(tokens Tokens) { - // Formatting is a multi-pass process. More details on the passes below, - // but this is the overview: - // - adjust the leading space on each line to create appropriate - // indentation - // - adjust spaces between tokens in a single cell using a set of rules - // - adjust the leading space in the "assign" and "comment" cells on each - // line to vertically align with neighboring lines. - // All of these steps operate in-place on the given tokens, so a caller - // may collect a flat sequence of all of the tokens underlying an AST - // and pass it here and we will then indirectly modify the AST itself. - // Formatting must change only whitespace. Specifically, that means - // changing the SpacesBefore attribute on a token while leaving the - // other token attributes unchanged. - - lines := linesForFormat(tokens) - formatIndent(lines) - formatSpaces(lines) - formatCells(lines) -} - -func formatIndent(lines []formatLine) { - // Our methodology for indents is to take the input one line at a time - // and count the bracketing delimiters on each line. If a line has a net - // increase in open brackets, we increase the indent level by one and - // remember how many new openers we had. If the line has a net _decrease_, - // we'll compare it to the most recent number of openers and decrease the - // dedent level by one each time we pass an indent level remembered - // earlier. - // The "indent stack" used here allows for us to recognize degenerate - // input where brackets are not symmetrical within lines and avoid - // pushing things too far left or right, creating confusion. - - // We'll start our indent stack at a reasonable capacity to minimize the - // chance of us needing to grow it; 10 here means 10 levels of indent, - // which should be more than enough for reasonable HCL uses. - indents := make([]int, 0, 10) - - for i := range lines { - line := &lines[i] - if len(line.lead) == 0 { - continue - } - - if line.lead[0].Type == hclsyntax.TokenNewline { - // Never place spaces before a newline - line.lead[0].SpacesBefore = 0 - continue - } - - netBrackets := 0 - for _, token := range line.lead { - netBrackets += tokenBracketChange(token) - if token.Type == hclsyntax.TokenOHeredoc { - break - } - } - - for _, token := range line.assign { - netBrackets += tokenBracketChange(token) - } - - switch { - case netBrackets > 0: - line.lead[0].SpacesBefore = 2 * len(indents) - indents = append(indents, netBrackets) - case netBrackets < 0: - closed := -netBrackets - for closed > 0 && len(indents) > 0 { - switch { - - case closed > indents[len(indents)-1]: - closed -= indents[len(indents)-1] - indents = indents[:len(indents)-1] - - case closed < indents[len(indents)-1]: - indents[len(indents)-1] -= closed - closed = 0 - - default: - indents = indents[:len(indents)-1] - closed = 0 - } - } - line.lead[0].SpacesBefore = 2 * len(indents) - default: - line.lead[0].SpacesBefore = 2 * len(indents) - } - } -} - -func formatSpaces(lines []formatLine) { - for _, line := range lines { - for i, token := range line.lead { - var before, after *Token - if i > 0 { - before = line.lead[i-1] - } else { - before = nilToken - } - if i < (len(line.lead) - 1) { - after = line.lead[i+1] - } else { - after = nilToken - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - for i, token := range line.assign { - if i == 0 { - // first token in "assign" always has one space before to - // separate the equals sign from what it's assigning. - token.SpacesBefore = 1 - } - - var before, after *Token - if i > 0 { - before = line.assign[i-1] - } else { - before = nilToken - } - if i < (len(line.assign) - 1) { - after = line.assign[i+1] - } else { - after = nilToken - } - if spaceAfterToken(token, before, after) { - after.SpacesBefore = 1 - } else { - after.SpacesBefore = 0 - } - } - - } -} - -func formatCells(lines []formatLine) { - - chainStart := -1 - maxColumns := 0 - - // We'll deal with the "assign" cell first, since moving that will - // also impact the "comment" cell. - closeAssignChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.assign[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.assign == nil { - if chainStart != -1 { - closeAssignChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeAssignChain(len(lines)) - } - - // Now we'll deal with the comments - closeCommentChain := func(i int) { - for _, chainLine := range lines[chainStart:i] { - columns := chainLine.lead.Columns() + chainLine.assign.Columns() - spaces := (maxColumns - columns) + 1 - chainLine.comment[0].SpacesBefore = spaces - } - chainStart = -1 - maxColumns = 0 - } - for i, line := range lines { - if line.comment == nil { - if chainStart != -1 { - closeCommentChain(i) - } - } else { - if chainStart == -1 { - chainStart = i - } - columns := line.lead.Columns() + line.assign.Columns() - if columns > maxColumns { - maxColumns = columns - } - } - } - if chainStart != -1 { - closeCommentChain(len(lines)) - } - -} - -// spaceAfterToken decides whether a particular subject token should have a -// space after it when surrounded by the given before and after tokens. -// "before" can be TokenNil, if the subject token is at the start of a sequence. -func spaceAfterToken(subject, before, after *Token) bool { - switch { - - case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: - // Never add spaces before a newline - return false - - case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: - // Don't split a function name from open paren in a call - return false - - case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: - // Don't use spaces around attribute access dots - return false - - case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: - // No space right before a comma or ... in an argument list - return false - - case subject.Type == hclsyntax.TokenComma: - // Always a space after a comma - return true - - case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: - // No extra spaces within templates - return false - - case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: - // This is a special case for inside for expressions where a user - // might want to use a literal tuple constructor: - // [for x in [foo]: x] - // ... in that case, we would normally produce in[foo] thinking that - // in is a reference, but we'll recognize it as a keyword here instead - // to make the result less confusing. - return true - - case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): - return false - - case subject.Type == hclsyntax.TokenMinus: - // Since a minus can either be subtraction or negation, and the latter - // should _not_ have a space after it, we need to use some heuristics - // to decide which case this is. - // We guess that we have a negation if the token before doesn't look - // like it could be the end of an expression. - - switch before.Type { - - case hclsyntax.TokenNil: - // Minus at the start of input must be a negation - return false - - case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: - // Minus immediately after an opening bracket or separator must be a negation. - return false - - case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: - // Minus immediately after another arithmetic operator must be negation. - return false - - case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: - // Minus immediately after another comparison operator must be negation. - return false - - case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: - // Minus immediately after logical operator doesn't make sense but probably intended as negation. - return false - - default: - return true - } - - case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: - // Unlike other bracket types, braces have spaces on both sides of them, - // both in single-line nested blocks foo { bar = baz } and in object - // constructor expressions foo = { bar = baz }. - if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { - // An open brace followed by a close brace is an exception, however. - // e.g. foo {} rather than foo { } - return false - } - return true - - // In the unlikely event that an interpolation expression is just - // a single object constructor, we'll put a space between the ${ and - // the following { to make this more obvious, and then the same - // thing for the two braces at the end. - case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: - return true - case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: - return true - - // Don't add spaces between interpolated items - case subject.Type == hclsyntax.TokenTemplateSeqEnd && (after.Type == hclsyntax.TokenTemplateInterp || after.Type == hclsyntax.TokenTemplateControl): - return false - - case tokenBracketChange(subject) > 0: - // No spaces after open brackets - return false - - case tokenBracketChange(after) < 0: - // No spaces before close brackets - return false - - default: - // Most tokens are space-separated - return true - - } -} - -func linesForFormat(tokens Tokens) []formatLine { - if len(tokens) == 0 { - return make([]formatLine, 0) - } - - // first we'll count our lines, so we can allocate the array for them in - // a single block. (We want to minimize memory pressure in this codepath, - // so it can be run somewhat-frequently by editor integrations.) - lineCount := 1 // if there are zero newlines then there is one line - for _, tok := range tokens { - if tokenIsNewline(tok) { - lineCount++ - } - } - - // To start, we'll just put everything in the "lead" cell on each line, - // and then do another pass over the lines afterwards to adjust. - lines := make([]formatLine, lineCount) - li := 0 - lineStart := 0 - for i, tok := range tokens { - if tok.Type == hclsyntax.TokenEOF { - // The EOF token doesn't belong to any line, and terminates the - // token sequence. - lines[li].lead = tokens[lineStart:i] - break - } - - if tokenIsNewline(tok) { - lines[li].lead = tokens[lineStart : i+1] - lineStart = i + 1 - li++ - } - } - - // If a set of tokens doesn't end in TokenEOF (e.g. because it's a - // fragment of tokens from the middle of a file) then we might fall - // out here with a line still pending. - if lineStart < len(tokens) { - lines[li].lead = tokens[lineStart:] - if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { - lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] - } - } - - // Now we'll pick off any trailing comments and attribute assignments - // to shuffle off into the "comment" and "assign" cells. - for i := range lines { - line := &lines[i] - - if len(line.lead) == 0 { - // if the line is empty then there's nothing for us to do - // (this should happen only for the final line, because all other - // lines would have a newline token of some kind) - continue - } - - if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { - line.comment = line.lead[len(line.lead)-1:] - line.lead = line.lead[:len(line.lead)-1] - } - - for i, tok := range line.lead { - if i > 0 && tok.Type == hclsyntax.TokenEqual { - // We only move the tokens into "assign" if the RHS seems to - // be a whole expression, which we determine by counting - // brackets. If there's a net positive number of brackets - // then that suggests we're introducing a multi-line expression. - netBrackets := 0 - for _, token := range line.lead[i:] { - netBrackets += tokenBracketChange(token) - } - - if netBrackets == 0 { - line.assign = line.lead[i:] - line.lead = line.lead[:i] - } - break - } - } - } - - return lines -} - -func tokenIsNewline(tok *Token) bool { - if tok.Type == hclsyntax.TokenNewline { - return true - } else if tok.Type == hclsyntax.TokenComment { - // Single line tokens (# and //) consume their terminating newline, - // so we need to treat them as newline tokens as well. - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - return true - } - } - return false -} - -func tokenBracketChange(tok *Token) int { - switch tok.Type { - case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: - return 1 - case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: - return -1 - default: - return 0 - } -} - -// formatLine represents a single line of source code for formatting purposes, -// splitting its tokens into up to three "cells": -// -// lead: always present, representing everything up to one of the others -// assign: if line contains an attribute assignment, represents the tokens -// starting at (and including) the equals symbol -// comment: if line contains any non-comment tokens and ends with a -// single-line comment token, represents the comment. -// -// When formatting, the leading spaces of the first tokens in each of these -// cells is adjusted to align vertically their occurences on consecutive -// rows. -type formatLine struct { - lead Tokens - assign Tokens - comment Tokens -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go deleted file mode 100644 index 4d439acd..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go +++ /dev/null @@ -1,252 +0,0 @@ -package hclwrite - -import ( - "fmt" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// TokensForValue returns a sequence of tokens that represents the given -// constant value. -// -// This function only supports types that are used by HCL. In particular, it -// does not support capsule types and will panic if given one. -// -// It is not possible to express an unknown value in source code, so this -// function will panic if the given value is unknown or contains any unknown -// values. A caller can call the value's IsWhollyKnown method to verify that -// no unknown values are present before calling TokensForValue. -func TokensForValue(val cty.Value) Tokens { - toks := appendTokensForValue(val, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -// TokensForTraversal returns a sequence of tokens that represents the given -// traversal. -// -// If the traversal is absolute then the result is a self-contained, valid -// reference expression. If the traversal is relative then the returned tokens -// could be appended to some other expression tokens to traverse into the -// represented expression. -func TokensForTraversal(traversal hcl.Traversal) Tokens { - toks := appendTokensForTraversal(traversal, nil) - format(toks) // fiddle with the SpacesBefore field to get canonical spacing - return toks -} - -func appendTokensForValue(val cty.Value, toks Tokens) Tokens { - switch { - - case !val.IsKnown(): - panic("cannot produce tokens for unknown value") - - case val.IsNull(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(`null`), - }) - - case val.Type() == cty.Bool: - var src []byte - if val.True() { - src = []byte(`true`) - } else { - src = []byte(`false`) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: src, - }) - - case val.Type() == cty.Number: - bf := val.AsBigFloat() - srcStr := bf.Text('f', -1) - toks = append(toks, &Token{ - Type: hclsyntax.TokenNumberLit, - Bytes: []byte(srcStr), - }) - - case val.Type() == cty.String: - // TODO: If it's a multi-line string ending in a newline, format - // it as a HEREDOC instead. - src := escapeQuotedStringLit(val.AsString()) - toks = append(toks, &Token{ - Type: hclsyntax.TokenOQuote, - Bytes: []byte{'"'}, - }) - if len(src) > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenQuotedLit, - Bytes: src, - }) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenCQuote, - Bytes: []byte{'"'}, - }) - - case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - - i := 0 - for it := val.ElementIterator(); it.Next(); { - if i > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - _, eVal := it.Element() - toks = appendTokensForValue(eVal, toks) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - - case val.Type().IsMapType() || val.Type().IsObjectType(): - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrace, - Bytes: []byte{'{'}, - }) - - i := 0 - for it := val.ElementIterator(); it.Next(); { - if i > 0 { - toks = append(toks, &Token{ - Type: hclsyntax.TokenComma, - Bytes: []byte{','}, - }) - } - eKey, eVal := it.Element() - if hclsyntax.ValidIdentifier(eKey.AsString()) { - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(eKey.AsString()), - }) - } else { - toks = appendTokensForValue(eKey, toks) - } - toks = append(toks, &Token{ - Type: hclsyntax.TokenEqual, - Bytes: []byte{'='}, - }) - toks = appendTokensForValue(eVal, toks) - i++ - } - - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrace, - Bytes: []byte{'}'}, - }) - - default: - panic(fmt.Sprintf("cannot produce tokens for %#v", val)) - } - - return toks -} - -func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { - for _, step := range traversal { - toks = appendTokensForTraversalStep(step, toks) - } - return toks -} - -func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) Tokens { - switch ts := step.(type) { - case hcl.TraverseRoot: - toks = append(toks, &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }) - case hcl.TraverseAttr: - toks = append( - toks, - &Token{ - Type: hclsyntax.TokenDot, - Bytes: []byte{'.'}, - }, - &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(ts.Name), - }, - ) - case hcl.TraverseIndex: - toks = append(toks, &Token{ - Type: hclsyntax.TokenOBrack, - Bytes: []byte{'['}, - }) - toks = appendTokensForValue(ts.Key, toks) - toks = append(toks, &Token{ - Type: hclsyntax.TokenCBrack, - Bytes: []byte{']'}, - }) - default: - panic(fmt.Sprintf("unsupported traversal step type %T", step)) - } - - return toks -} - -func escapeQuotedStringLit(s string) []byte { - if len(s) == 0 { - return nil - } - buf := make([]byte, 0, len(s)) - for i, r := range s { - switch r { - case '\n': - buf = append(buf, '\\', 'n') - case '\r': - buf = append(buf, '\\', 'r') - case '\t': - buf = append(buf, '\\', 't') - case '"': - buf = append(buf, '\\', '"') - case '\\': - buf = append(buf, '\\', '\\') - case '$', '%': - buf = appendRune(buf, r) - remain := s[i+1:] - if len(remain) > 0 && remain[0] == '{' { - // Double up our template introducer symbol to escape it. - buf = appendRune(buf, r) - } - default: - if !unicode.IsPrint(r) { - var fmted string - if r < 65536 { - fmted = fmt.Sprintf("\\u%04x", r) - } else { - fmted = fmt.Sprintf("\\U%08x", r) - } - buf = append(buf, fmted...) - } else { - buf = appendRune(buf, r) - } - } - } - return buf -} - -func appendRune(b []byte, r rune) []byte { - l := utf8.RuneLen(r) - for i := 0; i < l; i++ { - b = append(b, 0) // make room at the end of our buffer - } - ch := b[len(b)-l:] - utf8.EncodeRune(ch, r) - return b -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go deleted file mode 100644 index cedf6862..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go +++ /dev/null @@ -1,23 +0,0 @@ -package hclwrite - -import ( - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -type nativeNodeSorter struct { - Nodes []hclsyntax.Node -} - -func (s nativeNodeSorter) Len() int { - return len(s.Nodes) -} - -func (s nativeNodeSorter) Less(i, j int) bool { - rangeI := s.Nodes[i].Range() - rangeJ := s.Nodes[j].Range() - return rangeI.Start.Byte < rangeJ.Start.Byte -} - -func (s nativeNodeSorter) Swap(i, j int) { - s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go deleted file mode 100644 index 45669f7f..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go +++ /dev/null @@ -1,260 +0,0 @@ -package hclwrite - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" -) - -// node represents a node in the AST. -type node struct { - content nodeContent - - list *nodes - before, after *node -} - -func newNode(c nodeContent) *node { - return &node{ - content: c, - } -} - -func (n *node) Equal(other *node) bool { - return cmp.Equal(n.content, other.content) -} - -func (n *node) BuildTokens(to Tokens) Tokens { - return n.content.BuildTokens(to) -} - -// Detach removes the receiver from the list it currently belongs to. If the -// node is not currently in a list, this is a no-op. -func (n *node) Detach() { - if n.list == nil { - return - } - if n.before != nil { - n.before.after = n.after - } - if n.after != nil { - n.after.before = n.before - } - if n.list.first == n { - n.list.first = n.after - } - if n.list.last == n { - n.list.last = n.before - } - n.list = nil - n.before = nil - n.after = nil -} - -// ReplaceWith removes the receiver from the list it currently belongs to and -// inserts a new node with the given content in its place. If the node is not -// currently in a list, this function will panic. -// -// The return value is the newly-constructed node, containing the given content. -// After this function returns, the reciever is no longer attached to a list. -func (n *node) ReplaceWith(c nodeContent) *node { - if n.list == nil { - panic("can't replace node that is not in a list") - } - - before := n.before - after := n.after - list := n.list - n.before, n.after, n.list = nil, nil, nil - - nn := newNode(c) - nn.before = before - nn.after = after - nn.list = list - if before != nil { - before.after = nn - } - if after != nil { - after.before = nn - } - return nn -} - -func (n *node) assertUnattached() { - if n.list != nil { - panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) - } -} - -// nodeContent is the interface type implemented by all AST content types. -type nodeContent interface { - walkChildNodes(w internalWalkFunc) - BuildTokens(to Tokens) Tokens -} - -// nodes is a list of nodes. -type nodes struct { - first, last *node -} - -func (ns *nodes) BuildTokens(to Tokens) Tokens { - for n := ns.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -func (ns *nodes) Clear() { - ns.first = nil - ns.last = nil -} - -func (ns *nodes) Append(c nodeContent) *node { - n := &node{ - content: c, - } - ns.AppendNode(n) - n.list = ns - return n -} - -func (ns *nodes) AppendNode(n *node) { - if ns.last != nil { - n.before = ns.last - ns.last.after = n - } - n.list = ns - ns.last = n - if ns.first == nil { - ns.first = n - } -} - -func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { - if len(tokens) == 0 { - return nil - } - n := newNode(tokens) - ns.AppendNode(n) - n.list = ns - return n -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns *nodes) FindNodeWithContent(content nodeContent) *node { - for n := ns.first; n != nil; n = n.after { - if n.content == content { - return n - } - } - return nil -} - -// nodeSet is an unordered set of nodes. It is used to describe a set of nodes -// that all belong to the same list that have some role or characteristic -// in common. -type nodeSet map[*node]struct{} - -func newNodeSet() nodeSet { - return make(nodeSet) -} - -func (ns nodeSet) Has(n *node) bool { - if ns == nil { - return false - } - _, exists := ns[n] - return exists -} - -func (ns nodeSet) Add(n *node) { - ns[n] = struct{}{} -} - -func (ns nodeSet) Remove(n *node) { - delete(ns, n) -} - -func (ns nodeSet) List() []*node { - if len(ns) == 0 { - return nil - } - - ret := make([]*node, 0, len(ns)) - - // Determine which list we are working with. We assume here that all of - // the nodes belong to the same list, since that is part of the contract - // for nodeSet. - var list *nodes - for n := range ns { - list = n.list - break - } - - // We recover the order by iterating over the whole list. This is not - // the most efficient way to do it, but our node lists should always be - // small so not worth making things more complex. - for n := list.first; n != nil; n = n.after { - if ns.Has(n) { - ret = append(ret, n) - } - } - return ret -} - -// FindNodeWithContent searches the nodes for a node whose content equals -// the given content. If it finds one then it returns it. Otherwise it returns -// nil. -func (ns nodeSet) FindNodeWithContent(content nodeContent) *node { - for n := range ns { - if n.content == content { - return n - } - } - return nil -} - -type internalWalkFunc func(*node) - -// inTree can be embedded into a content struct that has child nodes to get -// a standard implementation of the NodeContent interface and a record of -// a potential parent node. -type inTree struct { - parent *node - children *nodes -} - -func newInTree() inTree { - return inTree{ - children: &nodes{}, - } -} - -func (it *inTree) assertUnattached() { - if it.parent != nil { - panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) - } -} - -func (it *inTree) walkChildNodes(w internalWalkFunc) { - for n := it.children.first; n != nil; n = n.after { - w(n) - } -} - -func (it *inTree) BuildTokens(to Tokens) Tokens { - for n := it.children.first; n != nil; n = n.after { - to = n.BuildTokens(to) - } - return to -} - -// leafNode can be embedded into a content struct to give it a do-nothing -// implementation of walkChildNodes -type leafNode struct { -} - -func (n *leafNode) walkChildNodes(w internalWalkFunc) { -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go deleted file mode 100644 index d6cf532e..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go +++ /dev/null @@ -1,599 +0,0 @@ -package hclwrite - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// Our "parser" here is actually not doing any parsing of its own. Instead, -// it leans on the native parser in hclsyntax, and then uses the source ranges -// from the AST to partition the raw token sequence to match the raw tokens -// up to AST nodes. -// -// This strategy feels somewhat counter-intuitive, since most of the work the -// parser does is thrown away here, but this strategy is chosen because the -// normal parsing work done by hclsyntax is considered to be the "main case", -// while modifying and re-printing source is more of an edge case, used only -// in ancillary tools, and so it's good to keep all the main parsing logic -// with the main case but keep all of the extra complexity of token wrangling -// out of the main parser, which is already rather complex just serving the -// use-cases it already serves. -// -// If the parsing step produces any errors, the returned File is nil because -// we can't reliably extract tokens from the partial AST produced by an -// erroneous parse. -func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - file, diags := hclsyntax.ParseConfig(src, filename, start) - if diags.HasErrors() { - return nil, diags - } - - // To do our work here, we use the "native" tokens (those from hclsyntax) - // to match against source ranges in the AST, but ultimately produce - // slices from our sequence of "writer" tokens, which contain only - // *relative* position information that is more appropriate for - // transformation/writing use-cases. - nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) - if diags.HasErrors() { - // should never happen, since we would've caught these diags in - // the first call above. - return nil, diags - } - writerTokens := writerTokens(nativeTokens) - - from := inputTokens{ - nativeTokens: nativeTokens, - writerTokens: writerTokens, - } - - before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) - ret := &File{ - inTree: newInTree(), - - srcBytes: src, - body: root, - } - - nodes := ret.inTree.children - nodes.Append(before.Tokens()) - nodes.AppendNode(root) - nodes.Append(after.Tokens()) - - return ret, diags -} - -type inputTokens struct { - nativeTokens hclsyntax.Tokens - writerTokens Tokens -} - -func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { - for i, t := range it.writerTokens { - if t.Type == ty { - return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) - } - } - panic(fmt.Sprintf("didn't find any token of type %s", ty)) -} - -func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { - before, within, after := it.PartitionType(ty) - if within.Len() != 1 { - panic("PartitionType found more than one token") - } - return before, within.Tokens()[0], after -} - -// PartitionIncludeComments is like Partition except the returned "within" -// range includes any lead and line comments associated with the range. -func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { - start, end := partitionTokens(it.nativeTokens, rng) - start = partitionLeadCommentTokens(it.nativeTokens[:start]) - _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) - end += afterNewline - - before = it.Slice(0, start) - within = it.Slice(start, end) - after = it.Slice(end, len(it.nativeTokens)) - return - -} - -// PartitionBlockItem is similar to PartitionIncludeComments but it returns -// the comments as separate token sequences so that they can be captured into -// AST attributes. It makes assumptions that apply only to block items, so -// should not be used for other constructs. -func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { - before, within, after = it.Partition(rng) - before, leadComments = before.PartitionLeadComments() - lineComments, newline, after = after.PartitionLineEndTokens() - return -} - -func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { - start := partitionLeadCommentTokens(it.nativeTokens) - before = it.Slice(0, start) - within = it.Slice(start, len(it.nativeTokens)) - return -} - -func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { - afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) - comments = it.Slice(0, afterComments) - newline = it.Slice(afterComments, afterNewline) - after = it.Slice(afterNewline, len(it.nativeTokens)) - return -} - -func (it inputTokens) Slice(start, end int) inputTokens { - // When we slice, we create a new slice with no additional capacity because - // we expect that these slices will be mutated in order to insert - // new code into the AST, and we want to ensure that a new underlying - // array gets allocated in that case, rather than writing into some - // following slice and corrupting it. - return inputTokens{ - nativeTokens: it.nativeTokens[start:end:end], - writerTokens: it.writerTokens[start:end:end], - } -} - -func (it inputTokens) Len() int { - return len(it.nativeTokens) -} - -func (it inputTokens) Tokens() Tokens { - return it.writerTokens -} - -func (it inputTokens) Types() []hclsyntax.TokenType { - ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) - for i, tok := range it.nativeTokens { - ret[i] = tok.Type - } - return ret -} - -// parseBody locates the given body within the given input tokens and returns -// the resulting *Body object as well as the tokens that appeared before and -// after it. -func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { - before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) - - // The main AST doesn't retain the original source ordering of the - // body items, so we need to reconstruct that ordering by inspecting - // their source ranges. - nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) - for _, nativeAttr := range nativeBody.Attributes { - nativeItems = append(nativeItems, nativeAttr) - } - for _, nativeBlock := range nativeBody.Blocks { - nativeItems = append(nativeItems, nativeBlock) - } - sort.Sort(nativeNodeSorter{nativeItems}) - - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - - remain := within - for _, nativeItem := range nativeItems { - beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) - - if beforeItem.Len() > 0 { - body.AppendUnstructuredTokens(beforeItem.Tokens()) - } - body.appendItemNode(item) - - remain = afterItem - } - - if remain.Len() > 0 { - body.AppendUnstructuredTokens(remain.Tokens()) - } - - return before, newNode(body), after -} - -func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { - before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) - - var item *node - - switch tItem := nativeItem.(type) { - case *hclsyntax.Attribute: - item = parseAttribute(tItem, within, leadComments, lineComments, newline) - case *hclsyntax.Block: - item = parseBlock(tItem, within, leadComments, lineComments, newline) - default: - // should never happen if caller is behaving - panic("unsupported native item type") - } - - return before, item, after -} - -func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { - attr := &Attribute{ - inTree: newInTree(), - } - children := attr.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - attr.leadComments = cn - children.AppendNode(cn) - } - - before, nameTokens, from := from.Partition(nativeAttr.NameRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if nameTokens.Len() != 1 { - // Should never happen with valid input - panic("attribute name is not exactly one token") - } - token := nameTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - attr.name = in - children.AppendNode(in) - } - - before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(equalsTokens.Tokens()) - - before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) - { - children.AppendUnstructuredTokens(before.Tokens()) - exprNode := parseExpression(nativeAttr.Expr, exprTokens) - attr.expr = exprNode - children.AppendNode(exprNode) - } - - { - cn := newNode(newComments(lineComments.Tokens())) - attr.lineComments = cn - children.AppendNode(cn) - } - - children.AppendUnstructuredTokens(newline.Tokens()) - - // Collect any stragglers, though there shouldn't be any - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(attr) -} - -func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { - block := &Block{ - inTree: newInTree(), - labels: newNodeSet(), - } - children := block.inTree.children - - { - cn := newNode(newComments(leadComments.Tokens())) - block.leadComments = cn - children.AppendNode(cn) - } - - before, typeTokens, from := from.Partition(nativeBlock.TypeRange) - { - children.AppendUnstructuredTokens(before.Tokens()) - if typeTokens.Len() != 1 { - // Should never happen with valid input - panic("block type name is not exactly one token") - } - token := typeTokens.Tokens()[0] - in := newNode(newIdentifier(token)) - block.typeName = in - children.AppendNode(in) - } - - for _, rng := range nativeBlock.LabelRanges { - var labelTokens inputTokens - before, labelTokens, from = from.Partition(rng) - children.AppendUnstructuredTokens(before.Tokens()) - tokens := labelTokens.Tokens() - var ln *node - if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { - ln = newNode(newIdentifier(tokens[0])) - } else { - ln = newNode(newQuoted(tokens)) - } - block.labels.Add(ln) - children.AppendNode(ln) - } - - before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(oBrace.Tokens()) - - // We go a bit out of order here: we go hunting for the closing brace - // so that we have a delimited body, but then we'll deal with the body - // before we actually append the closing brace and any straggling tokens - // that appear after it. - bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) - before, body, after := parseBody(nativeBlock.Body, bodyTokens) - children.AppendUnstructuredTokens(before.Tokens()) - block.body = body - children.AppendNode(body) - children.AppendUnstructuredTokens(after.Tokens()) - - children.AppendUnstructuredTokens(cBrace.Tokens()) - - // stragglers - children.AppendUnstructuredTokens(from.Tokens()) - if lineComments.Len() > 0 { - // blocks don't actually have line comments, so we'll just treat - // them as extra stragglers - children.AppendUnstructuredTokens(lineComments.Tokens()) - } - children.AppendUnstructuredTokens(newline.Tokens()) - - return newNode(block) -} - -func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { - expr := newExpression() - children := expr.inTree.children - - nativeVars := nativeExpr.Variables() - - for _, nativeTraversal := range nativeVars { - before, traversal, after := parseTraversal(nativeTraversal, from) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(traversal) - expr.absTraversals.Add(traversal) - from = after - } - // Attach any stragglers that don't belong to a traversal to the expression - // itself. In an expression with no traversals at all, this is just the - // entirety of "from". - children.AppendUnstructuredTokens(from.Tokens()) - - return newNode(expr) -} - -func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { - traversal := newTraversal() - children := traversal.inTree.children - before, from, after = from.Partition(nativeTraversal.SourceRange()) - - stepAfter := from - for _, nativeStep := range nativeTraversal { - before, step, after := parseTraversalStep(nativeStep, stepAfter) - children.AppendUnstructuredTokens(before.Tokens()) - children.AppendNode(step) - traversal.steps.Add(step) - stepAfter = after - } - - return before, newNode(traversal), after -} - -func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { - var children *nodes - switch tNativeStep := nativeStep.(type) { - - case hcl.TraverseRoot, hcl.TraverseAttr: - step := newTraverseName() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) - name := newIdentifier(token) - children.AppendUnstructuredTokens(inBefore.Tokens()) - step.name = children.Append(name) - children.AppendUnstructuredTokens(inAfter.Tokens()) - return before, newNode(step), after - - case hcl.TraverseIndex: - step := newTraverseIndex() - children = step.inTree.children - before, from, after = from.Partition(nativeStep.SourceRange()) - - var inBefore, oBrack, keyTokens, cBrack inputTokens - inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) - children.AppendUnstructuredTokens(inBefore.Tokens()) - children.AppendUnstructuredTokens(oBrack.Tokens()) - keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) - - keyVal := tNativeStep.Key - switch keyVal.Type() { - case cty.String: - key := newQuoted(keyTokens.Tokens()) - step.key = children.Append(key) - case cty.Number: - valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) - children.AppendUnstructuredTokens(valBefore.Tokens()) - key := newNumber(valToken) - step.key = children.Append(key) - children.AppendUnstructuredTokens(valAfter.Tokens()) - } - - children.AppendUnstructuredTokens(cBrack.Tokens()) - children.AppendUnstructuredTokens(from.Tokens()) - - return before, newNode(step), after - default: - panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) - } - -} - -// writerTokens takes a sequence of tokens as produced by the main hclsyntax -// package and transforms it into an equivalent sequence of tokens using -// this package's own token model. -// -// The resulting list contains the same number of tokens and uses the same -// indices as the input, allowing the two sets of tokens to be correlated -// by index. -func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { - // Ultimately we want a slice of token _pointers_, but since we can - // predict how much memory we're going to devote to tokens we'll allocate - // it all as a single flat buffer and thus give the GC less work to do. - tokBuf := make([]Token, len(nativeTokens)) - var lastByteOffset int - for i, mainToken := range nativeTokens { - // Create a copy of the bytes so that we can mutate without - // corrupting the original token stream. - bytes := make([]byte, len(mainToken.Bytes)) - copy(bytes, mainToken.Bytes) - - tokBuf[i] = Token{ - Type: mainToken.Type, - Bytes: bytes, - - // We assume here that spaces are always ASCII spaces, since - // that's what the scanner also assumes, and thus the number - // of bytes skipped is also the number of space characters. - SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, - } - - lastByteOffset = mainToken.Range.End.Byte - } - - // Now make a slice of pointers into the previous slice. - ret := make(Tokens, len(tokBuf)) - for i := range ret { - ret[i] = &tokBuf[i] - } - - return ret -} - -// partitionTokens takes a sequence of tokens and a hcl.Range and returns -// two indices within the token sequence that correspond with the range -// boundaries, such that the slice operator could be used to produce -// three token sequences for before, within, and after respectively: -// -// start, end := partitionTokens(toks, rng) -// before := toks[:start] -// within := toks[start:end] -// after := toks[end:] -// -// This works best when the range is aligned with token boundaries (e.g. -// because it was produced in terms of the scanner's result) but if that isn't -// true then it will make a best effort that may produce strange results at -// the boundaries. -// -// Native hclsyntax tokens are used here, because they contain the necessary -// absolute position information. However, since writerTokens produces a -// correlatable sequence of writer tokens, the resulting indices can be -// used also to index into its result, allowing the partitioning of writer -// tokens to be driven by the partitioning of native tokens. -// -// The tokens are assumed to be in source order and non-overlapping, which -// will be true if the token sequence from the scanner is used directly. -func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { - // We us a linear search here because we assume tha in most cases our - // target range is close to the beginning of the sequence, and the seqences - // are generally small for most reasonable files anyway. - for i := 0; ; i++ { - if i >= len(toks) { - // No tokens for the given range at all! - return len(toks), len(toks) - } - - if toks[i].Range.Start.Byte >= rng.Start.Byte { - start = i - break - } - } - - for i := start; ; i++ { - if i >= len(toks) { - // The range "hangs off" the end of the token sequence - return start, len(toks) - } - - if toks[i].Range.Start.Byte >= rng.End.Byte { - end = i // end marker is exclusive - break - } - } - - return start, end -} - -// partitionLeadCommentTokens takes a sequence of tokens that is assumed -// to immediately precede a construct that can have lead comment tokens, -// and returns the index into that sequence where the lead comments begin. -// -// Lead comments are defined as whole lines containing only comment tokens -// with no blank lines between. If no such lines are found, the returned -// index will be len(toks). -func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { - // single-line comments (which is what we're interested in here) - // consume their trailing newline, so we can just walk backwards - // until we stop seeing comment tokens. - for i := len(toks) - 1; i >= 0; i-- { - if toks[i].Type != hclsyntax.TokenComment { - return i + 1 - } - } - return 0 -} - -// partitionLineEndTokens takes a sequence of tokens that is assumed -// to immediately follow a construct that can have a line comment, and -// returns first the index where any line comments end and then second -// the index immediately after the trailing newline. -// -// Line comments are defined as comments that appear immediately after -// a construct on the same line where its significant tokens ended. -// -// Since single-line comment tokens (# and //) include the newline that -// terminates them, in the presence of these the two returned indices -// will be the same since the comment itself serves as the line end. -func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { - for i := 0; i < len(toks); i++ { - tok := toks[i] - if tok.Type != hclsyntax.TokenComment { - switch tok.Type { - case hclsyntax.TokenNewline: - return i, i + 1 - case hclsyntax.TokenEOF: - // Although this is valid, we mustn't include the EOF - // itself as our "newline" or else strange things will - // happen when we try to append new items. - return i, i - default: - // If we have well-formed input here then nothing else should be - // possible. This path should never happen, because we only try - // to extract tokens from the sequence if the parser succeeded, - // and it should catch this problem itself. - panic("malformed line trailers: expected only comments and newlines") - } - } - - if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { - // Newline at the end of a single-line comment serves both as - // the end of comments *and* the end of the line. - return i + 1, i + 1 - } - } - return len(toks), len(toks) -} - -// lexConfig uses the hclsyntax scanner to get a token stream and then -// rewrites it into this package's token model. -// -// Any errors produced during scanning are ignored, so the results of this -// function should be used with care. -func lexConfig(src []byte) Tokens { - mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) - return writerTokens(mainTokens) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go deleted file mode 100644 index 678a3aa4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go +++ /dev/null @@ -1,44 +0,0 @@ -package hclwrite - -import ( - "bytes" - - "github.com/hashicorp/hcl/v2" -) - -// NewFile creates a new file object that is empty and ready to have constructs -// added t it. -func NewFile() *File { - body := &Body{ - inTree: newInTree(), - items: newNodeSet(), - } - file := &File{ - inTree: newInTree(), - } - file.body = file.inTree.children.Append(body) - return file -} - -// ParseConfig interprets the given source bytes into a *hclwrite.File. The -// resulting AST can be used to perform surgical edits on the source code -// before turning it back into bytes again. -func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { - return parse(src, filename, start) -} - -// Format takes source code and performs simple whitespace changes to transform -// it to a canonical layout style. -// -// Format skips constructing an AST and works directly with tokens, so it -// is less expensive than formatting via the AST for situations where no other -// changes will be made. It also ignores syntax errors and can thus be applied -// to partial source code, although the result in that case may not be -// desirable. -func Format(src []byte) []byte { - tokens := lexConfig(src) - format(tokens) - buf := &bytes.Buffer{} - tokens.WriteTo(buf) - return buf.Bytes() -} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go deleted file mode 100644 index 3bd3e5f4..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go +++ /dev/null @@ -1,122 +0,0 @@ -package hclwrite - -import ( - "bytes" - "io" - - "github.com/apparentlymart/go-textseg/textseg" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// Token is a single sequence of bytes annotated with a type. It is similar -// in purpose to hclsyntax.Token, but discards the source position information -// since that is not useful in code generation. -type Token struct { - Type hclsyntax.TokenType - Bytes []byte - - // We record the number of spaces before each token so that we can - // reproduce the exact layout of the original file when we're making - // surgical changes in-place. When _new_ code is created it will always - // be in the canonical style, but we preserve layout of existing code. - SpacesBefore int -} - -// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. -// A complete token is not possible since we don't have source location -// information here, and so this method is unexported so we can be sure it will -// only be used for internal purposes where we know the range isn't important. -// -// This is primarily intended to allow us to re-use certain functionality from -// hclsyntax rather than re-implementing it against our own token type here. -func (t *Token) asHCLSyntax() hclsyntax.Token { - return hclsyntax.Token{ - Type: t.Type, - Bytes: t.Bytes, - Range: hcl.Range{ - Filename: "", - }, - } -} - -// Tokens is a flat list of tokens. -type Tokens []*Token - -func (ts Tokens) Bytes() []byte { - buf := &bytes.Buffer{} - ts.WriteTo(buf) - return buf.Bytes() -} - -func (ts Tokens) testValue() string { - return string(ts.Bytes()) -} - -// Columns returns the number of columns (grapheme clusters) the token sequence -// occupies. The result is not meaningful if there are newline or single-line -// comment tokens in the sequence. -func (ts Tokens) Columns() int { - ret := 0 - for _, token := range ts { - ret += token.SpacesBefore // spaces are always worth one column each - ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) - ret += ct - } - return ret -} - -// WriteTo takes an io.Writer and writes the bytes for each token to it, -// along with the spacing that separates each token. In other words, this -// allows serializing the tokens to a file or other such byte stream. -func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { - // We know we're going to be writing a lot of small chunks of repeated - // space characters, so we'll prepare a buffer of these that we can - // easily pass to wr.Write without any further allocation. - spaces := make([]byte, 40) - for i := range spaces { - spaces[i] = ' ' - } - - var n int64 - var err error - for _, token := range ts { - if err != nil { - return n, err - } - - for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { - thisChunk := spacesBefore - if thisChunk > len(spaces) { - thisChunk = len(spaces) - } - var thisN int - thisN, err = wr.Write(spaces[:thisChunk]) - n += int64(thisN) - if err != nil { - return n, err - } - } - - var thisN int - thisN, err = wr.Write(token.Bytes) - n += int64(thisN) - } - - return n, err -} - -func (ts Tokens) walkChildNodes(w internalWalkFunc) { - // Unstructured tokens have no child nodes -} - -func (ts Tokens) BuildTokens(to Tokens) Tokens { - return append(to, ts...) -} - -func newIdentToken(name string) *Token { - return &Token{ - Type: hclsyntax.TokenIdent, - Bytes: []byte(name), - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/ast.go b/vendor/github.com/hashicorp/hcl/v2/json/ast.go deleted file mode 100644 index 9c580ca3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/ast.go +++ /dev/null @@ -1,121 +0,0 @@ -package json - -import ( - "math/big" - - "github.com/hashicorp/hcl/v2" -) - -type node interface { - Range() hcl.Range - StartRange() hcl.Range -} - -type objectVal struct { - Attrs []*objectAttr - SrcRange hcl.Range // range of the entire object, brace-to-brace - OpenRange hcl.Range // range of the opening brace - CloseRange hcl.Range // range of the closing brace -} - -func (n *objectVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *objectVal) StartRange() hcl.Range { - return n.OpenRange -} - -type objectAttr struct { - Name string - Value node - NameRange hcl.Range // range of the name string -} - -func (n *objectAttr) Range() hcl.Range { - return n.NameRange -} - -func (n *objectAttr) StartRange() hcl.Range { - return n.NameRange -} - -type arrayVal struct { - Values []node - SrcRange hcl.Range // range of the entire object, bracket-to-bracket - OpenRange hcl.Range // range of the opening bracket -} - -func (n *arrayVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *arrayVal) StartRange() hcl.Range { - return n.OpenRange -} - -type booleanVal struct { - Value bool - SrcRange hcl.Range -} - -func (n *booleanVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *booleanVal) StartRange() hcl.Range { - return n.SrcRange -} - -type numberVal struct { - Value *big.Float - SrcRange hcl.Range -} - -func (n *numberVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *numberVal) StartRange() hcl.Range { - return n.SrcRange -} - -type stringVal struct { - Value string - SrcRange hcl.Range -} - -func (n *stringVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *stringVal) StartRange() hcl.Range { - return n.SrcRange -} - -type nullVal struct { - SrcRange hcl.Range -} - -func (n *nullVal) Range() hcl.Range { - return n.SrcRange -} - -func (n *nullVal) StartRange() hcl.Range { - return n.SrcRange -} - -// invalidVal is used as a placeholder where a value is needed for a valid -// parse tree but the input was invalid enough to prevent one from being -// created. -type invalidVal struct { - SrcRange hcl.Range -} - -func (n invalidVal) Range() hcl.Range { - return n.SrcRange -} - -func (n invalidVal) StartRange() hcl.Range { - return n.SrcRange -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go deleted file mode 100644 index fbdd8bff..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go +++ /dev/null @@ -1,33 +0,0 @@ -package json - -import ( - "github.com/agext/levenshtein" -) - -var keywords = []string{"false", "true", "null"} - -// keywordSuggestion tries to find a valid JSON keyword that is close to the -// given string and returns it if found. If no keyword is close enough, returns -// the empty string. -func keywordSuggestion(given string) string { - return nameSuggestion(given, keywords) -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/doc.go b/vendor/github.com/hashicorp/hcl/v2/json/doc.go deleted file mode 100644 index 84d73193..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package json is the JSON parser for HCL. It parses JSON files and returns -// implementations of the core HCL structural interfaces in terms of the -// JSON data inside. -// -// This is not a generic JSON parser. Instead, it deals with the mapping from -// the JSON information model to the HCL information model, using a number -// of hard-coded structural conventions. -// -// In most cases applications will not import this package directly, but will -// instead access its functionality indirectly through functions in the main -// "hcl" package and in the "hclparse" package. -package json diff --git a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go b/vendor/github.com/hashicorp/hcl/v2/json/navigation.go deleted file mode 100644 index bc8a97f7..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go +++ /dev/null @@ -1,70 +0,0 @@ -package json - -import ( - "fmt" - "strings" -) - -type navigation struct { - root node -} - -// Implementation of hcled.ContextString -func (n navigation) ContextString(offset int) string { - steps := navigationStepsRev(n.root, offset) - if steps == nil { - return "" - } - - // We built our slice backwards, so we'll reverse it in-place now. - half := len(steps) / 2 // integer division - for i := 0; i < half; i++ { - steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i] - } - - ret := strings.Join(steps, "") - if len(ret) > 0 && ret[0] == '.' { - ret = ret[1:] - } - return ret -} - -func navigationStepsRev(v node, offset int) []string { - switch tv := v.(type) { - case *objectVal: - // Do any of our properties have an object that contains the target - // offset? - for _, attr := range tv.Attrs { - k := attr.Name - av := attr.Value - - switch av.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if av.Range().ContainsOffset(offset) { - return append(navigationStepsRev(av, offset), "."+k) - } - } - case *arrayVal: - // Do any of our elements contain the target offset? - for i, elem := range tv.Values { - - switch elem.(type) { - case *objectVal, *arrayVal: - // okay - default: - continue - } - - if elem.Range().ContainsOffset(offset) { - return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/parser.go b/vendor/github.com/hashicorp/hcl/v2/json/parser.go deleted file mode 100644 index 7a54c51b..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/parser.go +++ /dev/null @@ -1,496 +0,0 @@ -package json - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{ - Filename: filename, - Pos: hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - }, - }) - p := newPeeker(tokens) - node, diags := parseValue(p) - if len(diags) == 0 && p.Peek().Type != tokenEOF { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous data after value", - Detail: "Extra characters appear after the JSON value.", - Subject: p.Peek().Range.Ptr(), - }) - } - return node, diags -} - -func parseValue(p *peeker) (node, hcl.Diagnostics) { - tok := p.Peek() - - wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { - if n != nil { - return n, diags - } - return invalidVal{tok.Range}, diags - } - - switch tok.Type { - case tokenBraceO: - return wrapInvalid(parseObject(p)) - case tokenBrackO: - return wrapInvalid(parseArray(p)) - case tokenNumber: - return wrapInvalid(parseNumber(p)) - case tokenString: - return wrapInvalid(parseString(p)) - case tokenKeyword: - return wrapInvalid(parseKeyword(p)) - case tokenBraceC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing JSON value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenBrackC: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing array element value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - case tokenEOF: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Missing value", - Detail: "The JSON data ends prematurely.", - Subject: &tok.Range, - }, - }) - default: - return wrapInvalid(nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid start of value", - Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", - Subject: &tok.Range, - }, - }) - } -} - -func tokenCanStartValue(tok token) bool { - switch tok.Type { - case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: - return true - default: - return false - } -} - -func parseObject(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - attrs := []*objectAttr{} - - // recover is used to shift the peeker to what seems to be the end of - // our object, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBraceO: - open++ - case tokenBraceC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBraceC { - break Token - } - - keyNode, keyDiags := parseValue(p) - diags = diags.Extend(keyDiags) - if keyNode == nil { - return nil, diags - } - - keyStrNode, ok := keyNode.(*stringVal) - if !ok { - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object property name", - Detail: "A JSON object property name must be a string", - Subject: keyNode.StartRange().Ptr(), - }) - } - - key := keyStrNode.Value - - colon := p.Read() - if colon.Type != tokenColon { - recover(colon) - - if colon.Type == tokenBraceC || colon.Type == tokenComma { - // Catch common mistake of using braces instead of brackets - // for an object. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing object value", - Detail: "A JSON object attribute must have a value, introduced by a colon.", - Subject: &colon.Range, - }) - } - - if colon.Type == tokenEquals { - // Possible confusion with native HCL syntax. - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", - Subject: &colon.Range, - }) - } - - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing property value colon", - Detail: "A colon must appear between an object property's name and its value.", - Subject: &colon.Range, - }) - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - attrs = append(attrs, &objectAttr{ - Name: key, - Value: valNode, - NameRange: keyStrNode.SrcRange, - }) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBraceC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in object", - Detail: "JSON does not permit a trailing comma after the final property in an object.", - Subject: &comma.Range, - }) - } - continue Token - case tokenEOF: - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing brace was found for this JSON object.", - Subject: &open.Range, - }) - case tokenBrackC: - // Consume the bracket anyway, so that we don't return with the peeker - // at a strange place. - p.Read() - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched braces", - Detail: "A JSON object must be closed with a brace, not a bracket.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBraceC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each property definition in an object.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &objectVal{ - Attrs: attrs, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - CloseRange: close.Range, - }, diags -} - -func parseArray(p *peeker) (node, hcl.Diagnostics) { - var diags hcl.Diagnostics - - open := p.Read() - vals := []node{} - - // recover is used to shift the peeker to what seems to be the end of - // our array, so that when we encounter an error we leave the peeker - // at a reasonable point in the token stream to continue parsing. - recover := func(tok token) { - open := 1 - for { - switch tok.Type { - case tokenBrackO: - open++ - case tokenBrackC: - open-- - if open <= 1 { - return - } - case tokenEOF: - // Ran out of source before we were able to recover, - // so we'll bail here and let the caller deal with it. - return - } - tok = p.Read() - } - } - -Token: - for { - if p.Peek().Type == tokenBrackC { - break Token - } - - valNode, valDiags := parseValue(p) - diags = diags.Extend(valDiags) - if valNode == nil { - return nil, diags - } - - vals = append(vals, valNode) - - switch p.Peek().Type { - case tokenComma: - comma := p.Read() - if p.Peek().Type == tokenBrackC { - // Special error message for this common mistake - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Trailing comma in array", - Detail: "JSON does not permit a trailing comma after the final value in an array.", - Subject: &comma.Range, - }) - } - continue Token - case tokenColon: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid array value", - Detail: "A colon is not used to introduce values in a JSON array.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenEOF: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unclosed object", - Detail: "No closing bracket was found for this JSON array.", - Subject: &open.Range, - }) - case tokenBraceC: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Mismatched brackets", - Detail: "A JSON array must be closed with a bracket, not a brace.", - Subject: p.Peek().Range.Ptr(), - }) - case tokenBrackC: - break Token - default: - recover(p.Read()) - return nil, diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing attribute seperator comma", - Detail: "A comma must appear between each value in an array.", - Subject: p.Peek().Range.Ptr(), - }) - } - - } - - close := p.Read() - return &arrayVal{ - Values: vals, - SrcRange: hcl.RangeBetween(open.Range, close.Range), - OpenRange: open.Range, - }, diags -} - -func parseNumber(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - - // Use encoding/json to validate the number syntax. - // TODO: Do this more directly to produce better diagnostics. - var num json.Number - err := json.Unmarshal(tok.Bytes, &num) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - // We want to guarantee that we parse numbers the same way as cty (and thus - // native syntax HCL) would here, so we'll use the cty parser even though - // in most other cases we don't actually introduce cty concepts until - // decoding time. We'll unwrap the parsed float immediately afterwards, so - // the cty value is just a temporary helper. - nv, err := cty.ParseNumberVal(string(num)) - if err != nil { - // Should never happen if above passed, since JSON numbers are a subset - // of what cty can parse... - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON number", - Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), - Subject: &tok.Range, - }, - } - } - - return &numberVal{ - Value: nv.AsBigFloat(), - SrcRange: tok.Range, - }, nil -} - -func parseString(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - var str string - err := json.Unmarshal(tok.Bytes, &str) - - if err != nil { - var errRange hcl.Range - if serr, ok := err.(*json.SyntaxError); ok { - errOfs := serr.Offset - errPos := tok.Range.Start - errPos.Byte += int(errOfs) - - // TODO: Use the byte offset to properly count unicode - // characters for the column, and mark the whole of the - // character that was wrong as part of our range. - errPos.Column += int(errOfs) - - errEndPos := errPos - errEndPos.Byte++ - errEndPos.Column++ - - errRange = hcl.Range{ - Filename: tok.Range.Filename, - Start: errPos, - End: errEndPos, - } - } else { - errRange = tok.Range - } - - var contextRange *hcl.Range - if errRange != tok.Range { - contextRange = &tok.Range - } - - // FIXME: Eventually we should parse strings directly here so - // we can produce a more useful error message in the face fo things - // such as invalid escapes, etc. - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON string", - Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), - Subject: &errRange, - Context: contextRange, - }, - } - } - - return &stringVal{ - Value: str, - SrcRange: tok.Range, - }, nil -} - -func parseKeyword(p *peeker) (node, hcl.Diagnostics) { - tok := p.Read() - s := string(tok.Bytes) - - switch s { - case "true": - return &booleanVal{ - Value: true, - SrcRange: tok.Range, - }, nil - case "false": - return &booleanVal{ - Value: false, - SrcRange: tok.Range, - }, nil - case "null": - return &nullVal{ - SrcRange: tok.Range, - }, nil - case "undefined", "NaN", "Infinity": - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), - Subject: &tok.Range, - }, - } - default: - var dym string - if suggest := keywordSuggestion(s); suggest != "" { - dym = fmt.Sprintf(" Did you mean %q?", suggest) - } - - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Invalid JSON keyword", - Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), - Subject: &tok.Range, - }, - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go b/vendor/github.com/hashicorp/hcl/v2/json/peeker.go deleted file mode 100644 index fc7bbf58..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go +++ /dev/null @@ -1,25 +0,0 @@ -package json - -type peeker struct { - tokens []token - pos int -} - -func newPeeker(tokens []token) *peeker { - return &peeker{ - tokens: tokens, - pos: 0, - } -} - -func (p *peeker) Peek() token { - return p.tokens[p.pos] -} - -func (p *peeker) Read() token { - ret := p.tokens[p.pos] - if ret.Type != tokenEOF { - p.pos++ - } - return ret -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/public.go b/vendor/github.com/hashicorp/hcl/v2/json/public.go deleted file mode 100644 index 8dc4a36a..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/public.go +++ /dev/null @@ -1,94 +0,0 @@ -package json - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/hcl/v2" -) - -// Parse attempts to parse the given buffer as JSON and, if successful, returns -// a hcl.File for the HCL configuration represented by it. -// -// This is not a generic JSON parser. Instead, it deals only with the profile -// of JSON used to express HCL configuration. -// -// The returned file is valid only if the returned diagnostics returns false -// from its HasErrors method. If HasErrors returns true, the file represents -// the subset of data that was able to be parsed, which may be none. -func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - rootNode, diags := parseFileContent(src, filename) - - switch rootNode.(type) { - case *objectVal, *arrayVal: - // okay - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Root value must be object", - Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", - Subject: rootNode.StartRange().Ptr(), - }) - - // Since we've already produced an error message for this being - // invalid, we'll return an empty placeholder here so that trying to - // extract content from our root body won't produce a redundant - // error saying the same thing again in more general terms. - fakePos := hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - } - fakeRange := hcl.Range{ - Filename: filename, - Start: fakePos, - End: fakePos, - } - rootNode = &objectVal{ - Attrs: []*objectAttr{}, - SrcRange: fakeRange, - OpenRange: fakeRange, - } - } - - file := &hcl.File{ - Body: &body{ - val: rootNode, - }, - Bytes: src, - Nav: navigation{rootNode}, - } - return file, diags -} - -// ParseFile is a convenience wrapper around Parse that first attempts to load -// data from the given filename, passing the result to Parse if successful. -// -// If the file cannot be read, an error diagnostic with nil context is returned. -func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { - f, err := os.Open(filename) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to open file", - Detail: fmt.Sprintf("The file %q could not be opened.", filename), - }, - } - } - defer f.Close() - - src, err := ioutil.ReadAll(f) - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), - }, - } - } - - return Parse(src, filename) -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go b/vendor/github.com/hashicorp/hcl/v2/json/scanner.go deleted file mode 100644 index 912eabce..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go +++ /dev/null @@ -1,297 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/apparentlymart/go-textseg/textseg" - "github.com/hashicorp/hcl/v2" -) - -//go:generate stringer -type tokenType scanner.go -type tokenType rune - -const ( - tokenBraceO tokenType = '{' - tokenBraceC tokenType = '}' - tokenBrackO tokenType = '[' - tokenBrackC tokenType = ']' - tokenComma tokenType = ',' - tokenColon tokenType = ':' - tokenKeyword tokenType = 'K' - tokenString tokenType = 'S' - tokenNumber tokenType = 'N' - tokenEOF tokenType = '␄' - tokenInvalid tokenType = 0 - tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax -) - -type token struct { - Type tokenType - Bytes []byte - Range hcl.Range -} - -// scan returns the primary tokens for the given JSON buffer in sequence. -// -// The responsibility of this pass is to just mark the slices of the buffer -// as being of various types. It is lax in how it interprets the multi-byte -// token types keyword, string and number, preferring to capture erroneous -// extra bytes that we presume the user intended to be part of the token -// so that we can generate more helpful diagnostics in the parser. -func scan(buf []byte, start pos) []token { - var tokens []token - p := start - for { - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - buf, p = skipWhitespace(buf, p) - - if len(buf) == 0 { - tokens = append(tokens, token{ - Type: tokenEOF, - Bytes: nil, - Range: posRange(p, p), - }) - return tokens - } - - start = p - - first := buf[0] - switch { - case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': - p.Pos.Column++ - p.Pos.Byte++ - tokens = append(tokens, token{ - Type: tokenType(first), - Bytes: buf[0:1], - Range: posRange(start, p), - }) - buf = buf[1:] - case first == '"': - var tokBuf []byte - tokBuf, buf, p = scanString(buf, p) - tokens = append(tokens, token{ - Type: tokenString, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartNumber(first): - var tokBuf []byte - tokBuf, buf, p = scanNumber(buf, p) - tokens = append(tokens, token{ - Type: tokenNumber, - Bytes: tokBuf, - Range: posRange(start, p), - }) - case byteCanStartKeyword(first): - var tokBuf []byte - tokBuf, buf, p = scanKeyword(buf, p) - tokens = append(tokens, token{ - Type: tokenKeyword, - Bytes: tokBuf, - Range: posRange(start, p), - }) - default: - tokens = append(tokens, token{ - Type: tokenInvalid, - Bytes: buf[:1], - Range: start.Range(1, 1), - }) - // If we've encountered an invalid then we might as well stop - // scanning since the parser won't proceed beyond this point. - return tokens - } - } -} - -func byteCanStartNumber(b byte) bool { - switch b { - // We are slightly more tolerant than JSON requires here since we - // expect the parser will make a stricter interpretation of the - // number bytes, but we specifically don't allow 'e' or 'E' here - // since we want the scanner to treat that as the start of an - // invalid keyword instead, to produce more intelligible error messages. - case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return true - default: - return false - } -} - -func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't check that the sequence of digit-ish bytes is - // in a valid order. The parser must do this when decoding a number - // token. - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func byteCanStartKeyword(b byte) bool { - switch { - // We allow any sequence of alphabetical characters here, even though - // JSON is more constrained, so that we can collect what we presume - // the user intended to be a single keyword and then check its validity - // in the parser, where we can generate better diagnostics. - // So e.g. we want to be able to say: - // unrecognized keyword "True". Did you mean "true"? - case isAlphabetical(b): - return true - default: - return false - } -} - -func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - b := buf[i] - switch { - case isAlphabetical(b) || b == '_': - p.Pos.Byte++ - p.Pos.Column++ - default: - break Byte - } - } - return buf[:i], buf[i:], p -} - -func scanString(buf []byte, start pos) ([]byte, []byte, pos) { - // The scanner doesn't validate correct use of escapes, etc. It pays - // attention to escapes only for the purpose of identifying the closing - // quote character. It's the parser's responsibility to do proper - // validation. - // - // The scanner also doesn't specifically detect unterminated string - // literals, though they can be identified in the parser by checking if - // the final byte in a string token is the double-quote character. - - // Skip the opening quote symbol - i := 1 - p := start - p.Pos.Byte++ - p.Pos.Column++ - escaping := false -Byte: - for i < len(buf) { - b := buf[i] - - switch { - case b == '\\': - escaping = !escaping - p.Pos.Byte++ - p.Pos.Column++ - i++ - case b == '"': - p.Pos.Byte++ - p.Pos.Column++ - i++ - if !escaping { - break Byte - } - escaping = false - case b < 32: - break Byte - default: - // Advance by one grapheme cluster, so that we consider each - // grapheme to be a "column". - // Ignoring error because this scanner cannot produce errors. - advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) - - p.Pos.Byte += advance - p.Pos.Column++ - i += advance - - escaping = false - } - } - return buf[:i], buf[i:], p -} - -func skipWhitespace(buf []byte, start pos) ([]byte, pos) { - var i int - p := start -Byte: - for i = 0; i < len(buf); i++ { - switch buf[i] { - case ' ': - p.Pos.Byte++ - p.Pos.Column++ - case '\n': - p.Pos.Byte++ - p.Pos.Column = 1 - p.Pos.Line++ - case '\r': - // For the purpose of line/column counting we consider a - // carriage return to take up no space, assuming that it will - // be paired up with a newline (on Windows, for example) that - // will account for both of them. - p.Pos.Byte++ - case '\t': - // We arbitrarily count a tab as if it were two spaces, because - // we need to choose _some_ number here. This means any system - // that renders code on-screen with markers must itself treat - // tabs as a pair of spaces for rendering purposes, or instead - // use the byte offset and back into its own column position. - p.Pos.Byte++ - p.Pos.Column += 2 - default: - break Byte - } - } - return buf[i:], p -} - -type pos struct { - Filename string - Pos hcl.Pos -} - -func (p *pos) Range(byteLen, charLen int) hcl.Range { - start := p.Pos - end := p.Pos - end.Byte += byteLen - end.Column += charLen - return hcl.Range{ - Filename: p.Filename, - Start: start, - End: end, - } -} - -func posRange(start, end pos) hcl.Range { - return hcl.Range{ - Filename: start.Filename, - Start: start.Pos, - End: end.Pos, - } -} - -func (t token) GoString() string { - return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) -} - -func isAlphabetical(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/spec.md b/vendor/github.com/hashicorp/hcl/v2/json/spec.md deleted file mode 100644 index dac5729d..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/spec.md +++ /dev/null @@ -1,405 +0,0 @@ -# HCL JSON Syntax Specification - -This is the specification for the JSON serialization for hcl. HCL is a system -for defining configuration languages for applications. The HCL information -model is designed to support multiple concrete syntaxes for configuration, -and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) -by being easy to machine-generate, whereas the native syntax is oriented -towards human authoring and maintenance - -This syntax is defined in terms of JSON as defined in -[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON -grammar as-is, and merely defines a specific methodology for interpreting -JSON constructs into HCL structural elements and expressions. - -This mapping is defined such that valid JSON-serialized HCL input can be -_produced_ using standard JSON implementations in various programming languages. -_Parsing_ such JSON has some additional constraints not beyond what is normally -supported by JSON parsers, so a specialized parser may be required that -is able to: - -- Preserve the relative ordering of properties defined in an object. -- Preserve multiple definitions of the same property name. -- Preserve numeric values to the precision required by the number type - in [the HCL syntax-agnostic information model](../spec.md). -- Retain source location information for parsed tokens/constructs in order - to produce good error messages. - -## Structural Elements - -[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an -abstract container for attribute definitions and child blocks. A body is -represented in JSON as either a single JSON object or a JSON array of objects. - -Body processing is in terms of JSON object properties, visited in the order -they appear in the input. Where a body is represented by a single JSON object, -the properties of that object are visited in order. Where a body is -represented by a JSON array, each of its elements are visited in order and -each element has its properties visited in order. If any element of the array -is not a JSON object then the input is erroneous. - -When a body is being processed in the _dynamic attributes_ mode, the allowance -of a JSON array in the previous paragraph does not apply and instead a single -JSON object is always required. - -As defined in the language-agnostic model, body processing is in terms -of a schema which provides context for interpreting the body's content. For -JSON bodies, the schema is crucial to allow differentiation of attribute -definitions and block definitions, both of which are represented via object -properties. - -The special property name `"//"`, when used in an object representing a HCL -body, is parsed and ignored. A property with this name can be used to -include human-readable comments. (This special property name is _not_ -processed in this way for any _other_ HCL constructs that are represented as -JSON objects.) - -### Attributes - -Where the given schema describes an attribute with a given name, the object -property with the matching name — if present — serves as the attribute's -definition. - -When a body is being processed in the _dynamic attributes_ mode, each object -property serves as an attribute definition for the attribute whose name -matches the property name. - -The value of an attribute definition property is interpreted as an _expression_, -as described in a later section. - -Given a schema that calls for an attribute named "foo", a JSON object like -the following provides a definition for that attribute: - -```json -{ - "foo": "bar baz" -} -``` - -### Blocks - -Where the given schema describes a block with a given type name, each object -property with the matching name serves as a definition of zero or more blocks -of that type. - -Processing of child blocks is in terms of nested JSON objects and arrays. -If the schema defines one or more _labels_ for the block type, a nested JSON -object or JSON array of objects is required for each labelling level. These -are flattened to a single ordered sequence of object properties using the -same algorithm as for body content as defined above. Each object property -serves as a label value at the corresponding level. - -After any labelling levels, the next nested value is either a JSON object -representing a single block body, or a JSON array of JSON objects that each -represent a single block body. Use of an array accommodates the definition -of multiple blocks that have identical type and labels. - -Given a schema that calls for a block type named "foo" with no labels, the -following JSON objects are all valid definitions of zero or more blocks of this -type: - -```json -{ - "foo": { - "child_attr": "baz" - } -} -``` - -```json -{ - "foo": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] -} -``` - -```json -{ - "foo": [] -} -``` - -The first of these defines a single child block of type "foo". The second -defines _two_ such blocks. The final example shows a degenerate definition -of zero blocks, though generators should prefer to omit the property entirely -in this scenario. - -Given a schema that calls for a block type named "foo" with _two_ labels, the -extra label levels must be represented as objects or arrays of objects as in -the following examples: - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": { - "child_attr": "baz" - } - } - } -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "boz": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -```json -{ - "foo": [ - { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - } - }, - { - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } - ] -} -``` - -```json -{ - "foo": { - "bar": { - "baz": { - "child_attr": "baz" - }, - "boz": { - "child_attr": "baz" - } - }, - "bar": { - "baz": [ - { - "child_attr": "baz" - }, - { - "child_attr": "boz" - } - ] - } - } -} -``` - -Arrays can be introduced at either the label definition or block body -definition levels to define multiple definitions of the same block type -or labels while preserving order. - -A JSON HCL parser _must_ support duplicate definitions of the same property -name within a single object, preserving all of them and the relative ordering -between them. The array-based forms are also required so that JSON HCL -configurations can be produced with JSON producing libraries that are not -able to preserve property definition order and multiple definitions of -the same property. - -## Expressions - -JSON lacks a native expression syntax, so the HCL JSON syntax instead defines -a mapping for each of the JSON value types, including a special mapping for -strings that allows optional use of arbitrary expressions. - -### Objects - -When interpreted as an expression, a JSON object represents a value of a HCL -object type. - -Each property of the JSON object represents an attribute of the HCL object type. -The property name string given in the JSON input is interpreted as a string -expression as described below, and its result is converted to string as defined -by the syntax-agnostic information model. If such a conversion is not possible, -an error is produced and evaluation fails. - -An instance of the constructed object type is then created, whose values -are interpreted by again recursively applying the mapping rules defined in -this section to each of the property values. - -If any evaluated property name strings produce null values, an error is -produced and evaluation fails. If any produce _unknown_ values, the _entire -object's_ result is an unknown value of the dynamic pseudo-type, signalling -that the type of the object cannot be determined. - -It is an error to define the same property name multiple times within a single -JSON object interpreted as an expression. In full expression mode, this -constraint applies to the name expression results after conversion to string, -rather than the raw string that may contain interpolation expressions. - -### Arrays - -When interpreted as an expression, a JSON array represents a value of a HCL -tuple type. - -Each element of the JSON array represents an element of the HCL tuple type. -The tuple type is constructed by enumerating the JSON array elements, creating -for each an element whose type is the result of recursively applying the -expression mapping rules. Correspondence is preserved between the array element -indices and the tuple element indices. - -An instance of the constructed tuple type is then created, whose values are -interpreted by again recursively applying the mapping rules defined in this -section. - -### Numbers - -When interpreted as an expression, a JSON number represents a HCL number value. - -HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must -be able to translate exactly the value given to a number of corresponding -precision, within the constraints set by the HCL syntax-agnostic information -model. - -In practice, off-the-shelf JSON serializers often do not support customizing the -processing of numbers, and instead force processing as 32-bit or 64-bit -floating point values. - -A _producer_ of JSON HCL that uses such a serializer can provide numeric values -as JSON strings where they have precision too great for representation in the -serializer's chosen numeric type in situations where the result will be -converted to number (using the standard conversion rules) by a calling -application. - -Alternatively, for expressions that are evaluated in full expression mode an -embedded template interpolation can be used to faithfully represent a number, -such as `"${1e150}"`, which will then be evaluated by the underlying HCL native -syntax expression evaluator. - -### Boolean Values - -The JSON boolean values `true` and `false`, when interpreted as expressions, -represent the corresponding HCL boolean values. - -### The Null Value - -The JSON value `null`, when interpreted as an expression, represents a -HCL null value of the dynamic pseudo-type. - -### Strings - -When interpreted as an expression, a JSON string may be interpreted in one of -two ways depending on the evaluation mode. - -If evaluating in literal-only mode (as defined by the syntax-agnostic -information model) the literal string is intepreted directly as a HCL string -value, by directly using the exact sequence of unicode characters represented. -Template interpolations and directives MUST NOT be processed in this mode, -allowing any characters that appear as introduction sequences to pass through -literally: - -```json -"Hello world! Template sequences like ${ are not intepreted here." -``` - -When evaluating in full expression mode (again, as defined by the syntax- -agnostic information model) the literal string is instead interpreted as a -_standalone template_ in the HCL Native Syntax. The expression evaluation -result is then the direct result of evaluating that template with the current -variable scope and function table. - -```json -"Hello, ${name}! Template sequences are interpreted in full expression mode." -``` - -In particular the _Template Interpolation Unwrapping_ requirement from the -HCL native syntax specification must be implemented, allowing the use of -single-interpolation templates to represent expressions that would not -otherwise be representable in JSON, such as the following example where -the result must be a number, rather than a string representation of a number: - -```json -"${ a + b }" -``` - -## Static Analysis - -The HCL static analysis operations are implemented for JSON values that -represent expressions, as described in the following sections. - -Due to the limited expressive power of the JSON syntax alone, use of these -static analyses functions rather than normal expression evaluation is used -as additional context for how a JSON value is to be interpreted, which means -that static analyses can result in a different interpretation of a given -expression than normal evaluation. - -### Static List - -An expression interpreted as a static list must be a JSON array. Each of the -values in the array is interpreted as an expression and returned. - -### Static Map - -An expression interpreted as a static map must be a JSON object. Each of the -key/value pairs in the object is presented as a pair of expressions. Since -object property names are always strings, evaluating the key expression with -a non-`nil` evaluation context will evaluate any template sequences given -in the property name. - -### Static Call - -An expression interpreted as a static call must be a string. The content of -the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then the static call analysis is delegated to -that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. - -### Static Traversal - -An expression interpreted as a static traversal must be a string. The content -of the string is interpreted as a native syntax expression (not a _template_, -unlike normal evaluation) and then static traversal analysis is delegated -to that expression. - -If the original expression is not a string or its contents cannot be parsed -as a native syntax expression then static call analysis is not supported. diff --git a/vendor/github.com/hashicorp/hcl/v2/json/structure.go b/vendor/github.com/hashicorp/hcl/v2/json/structure.go deleted file mode 100644 index 76c9d739..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/structure.go +++ /dev/null @@ -1,637 +0,0 @@ -package json - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// body is the implementation of "Body" used for files processed with the JSON -// parser. -type body struct { - val node - - // If non-nil, the keys of this map cause the corresponding attributes to - // be treated as non-existing. This is used when Body.PartialContent is - // called, to produce the "remaining content" Body. - hiddenAttrs map[string]struct{} -} - -// expression is the implementation of "Expression" used for files processed -// with the JSON parser. -type expression struct { - src node -} - -func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, newBody, diags := b.PartialContent(schema) - - hiddenAttrs := newBody.(*body).hiddenAttrs - - var nameSuggestions []string - for _, attrS := range schema.Attributes { - if _, ok := hiddenAttrs[attrS.Name]; !ok { - // Only suggest an attribute name if we didn't use it already. - nameSuggestions = append(nameSuggestions, attrS.Name) - } - } - for _, blockS := range schema.Blocks { - // Blocks can appear multiple times, so we'll suggest their type - // names regardless of whether they've already been used. - nameSuggestions = append(nameSuggestions, blockS.Type) - } - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - for _, attr := range jsonAttrs { - k := attr.Name - if k == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, ok := hiddenAttrs[k]; !ok { - suggestion := nameSuggestion(k, nameSuggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Extraneous JSON object property", - Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), - Subject: &attr.NameRange, - Context: attr.Range().Ptr(), - }) - } - } - - return content, diags -} - -func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - - jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) - diags = append(diags, attrDiags...) - - usedNames := map[string]struct{}{} - if b.hiddenAttrs != nil { - for k := range b.hiddenAttrs { - usedNames[k] = struct{}{} - } - } - - content := &hcl.BodyContent{ - Attributes: map[string]*hcl.Attribute{}, - Blocks: nil, - - MissingItemRange: b.MissingItemRange(), - } - - // Create some more convenient data structures for our work below. - attrSchemas := map[string]hcl.AttributeSchema{} - blockSchemas := map[string]hcl.BlockHeaderSchema{} - for _, attrS := range schema.Attributes { - attrSchemas[attrS.Name] = attrS - } - for _, blockS := range schema.Blocks { - blockSchemas[blockS.Type] = blockS - } - - for _, jsonAttr := range jsonAttrs { - attrName := jsonAttr.Name - if _, used := b.hiddenAttrs[attrName]; used { - continue - } - - if attrS, defined := attrSchemas[attrName]; defined { - if existing, exists := content.Attributes[attrName]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate argument", - Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), - Subject: &jsonAttr.NameRange, - Context: jsonAttr.Range().Ptr(), - }) - continue - } - - content.Attributes[attrS.Name] = &hcl.Attribute{ - Name: attrS.Name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - usedNames[attrName] = struct{}{} - - } else if blockS, defined := blockSchemas[attrName]; defined { - bv := jsonAttr.Value - blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) - diags = append(diags, blockDiags...) - usedNames[attrName] = struct{}{} - } - - // We ignore anything that isn't defined because that's the - // PartialContent contract. The Content method will catch leftovers. - } - - // Make sure we got all the required attributes. - for _, attrS := range schema.Attributes { - if !attrS.Required { - continue - } - if _, defined := content.Attributes[attrS.Name]; !defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required argument", - Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), - Subject: b.MissingItemRange().Ptr(), - }) - } - } - - unusedBody := &body{ - val: b.val, - hiddenAttrs: usedNames, - } - - return content, unusedBody, diags -} - -// JustAttributes for JSON bodies interprets all properties of the wrapped -// JSON object as attributes and returns them. -func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - attrs := make(map[string]*hcl.Attribute) - - obj, ok := b.val.(*objectVal) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, setting the arguments for this block.", - Subject: b.val.StartRange().Ptr(), - }) - return attrs, diags - } - - for _, jsonAttr := range obj.Attrs { - name := jsonAttr.Name - if name == "//" { - // Ignore "//" keys in objects representing bodies, to allow - // their use as comments. - continue - } - - if _, hidden := b.hiddenAttrs[name]; hidden { - continue - } - - if existing, exists := attrs[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate attribute definition", - Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), - Subject: &jsonAttr.NameRange, - }) - continue - } - - attrs[name] = &hcl.Attribute{ - Name: name, - Expr: &expression{src: jsonAttr.Value}, - Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), - NameRange: jsonAttr.NameRange, - } - } - - // No diagnostics possible here, since the parser already took care of - // finding duplicates and every JSON value can be a valid attribute value. - return attrs, diags -} - -func (b *body) MissingItemRange() hcl.Range { - switch tv := b.val.(type) { - case *objectVal: - return tv.CloseRange - case *arrayVal: - return tv.OpenRange - default: - // Should not happen in correct operation, but might show up if the - // input is invalid and we are producing partial results. - return tv.StartRange() - } -} - -func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { - if len(labelsLeft) > 0 { - labelName := labelsLeft[0] - jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) - diags = append(diags, attrDiags...) - - if len(jsonAttrs) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing block label", - Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), - Subject: v.StartRange().Ptr(), - }) - return - } - labelsUsed := append(labelsUsed, "") - labelRanges := append(labelRanges, hcl.Range{}) - for _, p := range jsonAttrs { - pk := p.Name - labelsUsed[len(labelsUsed)-1] = pk - labelRanges[len(labelRanges)-1] = p.NameRange - diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) - } - return - } - - // By the time we get here, we've peeled off all the labels and we're ready - // to deal with the block's actual content. - - // need to copy the label slices because their underlying arrays will - // continue to be mutated after we return. - labels := make([]string, len(labelsUsed)) - copy(labels, labelsUsed) - labelR := make([]hcl.Range, len(labelRanges)) - copy(labelR, labelRanges) - - switch tv := v.(type) { - case *nullVal: - // There is no block content, e.g the value is null. - return - case *objectVal: - // Single instance of the block - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: tv, - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - case *arrayVal: - // Multiple instances of the block - for _, av := range tv.Values { - *blocks = append(*blocks, &hcl.Block{ - Type: typeName, - Labels: labels, - Body: &body{ - val: av, // might be mistyped; we'll find out when content is requested for this body - }, - - DefRange: tv.OpenRange, - TypeRange: *typeRange, - LabelRanges: labelR, - }) - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), - Subject: v.StartRange().Ptr(), - }) - } - return -} - -// collectDeepAttrs takes either a single object or an array of objects and -// flattens it into a list of object attributes, collecting attributes from -// all of the objects in a given array. -// -// Ordering is preserved, so a list of objects that each have one property -// will result in those properties being returned in the same order as the -// objects appeared in the array. -// -// This is appropriate for use only for objects representing bodies or labels -// within a block. -// -// The labelName argument, if non-null, is used to tailor returned error -// messages to refer to block labels rather than attributes and child blocks. -// It has no other effect. -func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { - var diags hcl.Diagnostics - var attrs []*objectAttr - - switch tv := v.(type) { - case *nullVal: - // If a value is null, then we don't return any attributes or return an error. - - case *objectVal: - attrs = append(attrs, tv.Attrs...) - - case *arrayVal: - for _, ev := range tv.Values { - switch tev := ev.(type) { - case *objectVal: - attrs = append(attrs, tev.Attrs...) - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), - Subject: ev.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "A JSON object is required here, to define arguments and child blocks.", - Subject: ev.StartRange().Ptr(), - }) - } - } - } - - default: - if labelName != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), - Subject: v.StartRange().Ptr(), - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect JSON value type", - Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", - Subject: v.StartRange().Ptr(), - }) - } - } - - return attrs, diags -} - -func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - switch v := e.src.(type) { - case *stringVal: - if ctx != nil { - // Parse string contents as a HCL native language expression. - // We only do this if we have a context, so passing a nil context - // is how the caller specifies that interpolations are not allowed - // and that the string should just be returned verbatim. - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, evalDiags := expr.Value(ctx) - diags = append(diags, evalDiags...) - return val, diags - } - - return cty.StringVal(v.Value), nil - case *numberVal: - return cty.NumberVal(v.Value), nil - case *booleanVal: - return cty.BoolVal(v.Value), nil - case *arrayVal: - var diags hcl.Diagnostics - vals := []cty.Value{} - for _, jsonVal := range v.Values { - val, valDiags := (&expression{src: jsonVal}).Value(ctx) - vals = append(vals, val) - diags = append(diags, valDiags...) - } - return cty.TupleVal(vals), diags - case *objectVal: - var diags hcl.Diagnostics - attrs := map[string]cty.Value{} - attrRanges := map[string]hcl.Range{} - known := true - for _, jsonAttr := range v.Attrs { - // In this one context we allow keys to contain interpolation - // expressions too, assuming we're evaluating in interpolation - // mode. This achieves parity with the native syntax where - // object expressions can have dynamic keys, while block contents - // may not. - name, nameDiags := (&expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}).Value(ctx) - valExpr := &expression{src: jsonAttr.Value} - val, valDiags := valExpr.Value(ctx) - diags = append(diags, nameDiags...) - diags = append(diags, valDiags...) - - var err error - name, err = convert.Convert(name, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if name.IsNull() { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid object key expression", - Detail: "Cannot use null value as an object key.", - Subject: &jsonAttr.NameRange, - Expression: valExpr, - EvalContext: ctx, - }) - continue - } - if !name.IsKnown() { - // This is a bit of a weird case, since our usual rules require - // us to tolerate unknowns and just represent the result as - // best we can but if we don't know the key then we can't - // know the type of our object at all, and thus we must turn - // the whole thing into cty.DynamicVal. This is consistent with - // how this situation is handled in the native syntax. - // We'll keep iterating so we can collect other errors in - // subsequent attributes. - known = false - continue - } - nameStr := name.AsString() - if _, defined := attrs[nameStr]; defined { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate object attribute", - Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), - Subject: &jsonAttr.NameRange, - Expression: e, - EvalContext: ctx, - }) - continue - } - attrs[nameStr] = val - attrRanges[nameStr] = jsonAttr.NameRange - } - if !known { - // We encountered an unknown key somewhere along the way, so - // we can't know what our type will eventually be. - return cty.DynamicVal, diags - } - return cty.ObjectVal(attrs), diags - case *nullVal: - return cty.NullVal(cty.DynamicPseudoType), nil - default: - // Default to DynamicVal so that ASTs containing invalid nodes can - // still be partially-evaluated. - return cty.DynamicVal, nil - } -} - -func (e *expression) Variables() []hcl.Traversal { - var vars []hcl.Traversal - - switch v := e.src.(type) { - case *stringVal: - templateSrc := v.Value - expr, diags := hclsyntax.ParseTemplate( - []byte(templateSrc), - v.SrcRange.Filename, - - // This won't produce _exactly_ the right result, since - // the hclsyntax parser can't "see" any escapes we removed - // while parsing JSON, but it's better than nothing. - hcl.Pos{ - Line: v.SrcRange.Start.Line, - - // skip over the opening quote mark - Byte: v.SrcRange.Start.Byte + 1, - Column: v.SrcRange.Start.Column + 1, - }, - ) - if diags.HasErrors() { - return vars - } - return expr.Variables() - - case *arrayVal: - for _, jsonVal := range v.Values { - vars = append(vars, (&expression{src: jsonVal}).Variables()...) - } - case *objectVal: - for _, jsonAttr := range v.Attrs { - keyExpr := &stringVal{ // we're going to treat key as an expression in this context - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - } - vars = append(vars, (&expression{src: keyExpr}).Variables()...) - vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) - } - } - - return vars -} - -func (e *expression) Range() hcl.Range { - return e.src.Range() -} - -func (e *expression) StartRange() hcl.Range { - return e.src.StartRange() -} - -// Implementation for hcl.AbsTraversalForExpr. -func (e *expression) AsTraversal() hcl.Traversal { - // In JSON-based syntax a traversal is given as a string containing - // traversal syntax as defined by hclsyntax.ParseTraversalAbs. - - switch v := e.src.(type) { - case *stringVal: - traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - return traversal - default: - return nil - } -} - -// Implementation for hcl.ExprCall. -func (e *expression) ExprCall() *hcl.StaticCall { - // In JSON-based syntax a static call is given as a string containing - // an expression in the native syntax that also supports ExprCall. - - switch v := e.src.(type) { - case *stringVal: - expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) - if diags.HasErrors() { - return nil - } - - call, diags := hcl.ExprCall(expr) - if diags.HasErrors() { - return nil - } - - return call - default: - return nil - } -} - -// Implementation for hcl.ExprList. -func (e *expression) ExprList() []hcl.Expression { - switch v := e.src.(type) { - case *arrayVal: - ret := make([]hcl.Expression, len(v.Values)) - for i, node := range v.Values { - ret[i] = &expression{src: node} - } - return ret - default: - return nil - } -} - -// Implementation for hcl.ExprMap. -func (e *expression) ExprMap() []hcl.KeyValuePair { - switch v := e.src.(type) { - case *objectVal: - ret := make([]hcl.KeyValuePair, len(v.Attrs)) - for i, jsonAttr := range v.Attrs { - ret[i] = hcl.KeyValuePair{ - Key: &expression{src: &stringVal{ - Value: jsonAttr.Name, - SrcRange: jsonAttr.NameRange, - }}, - Value: &expression{src: jsonAttr.Value}, - } - } - return ret - default: - return nil - } -} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go deleted file mode 100644 index bbcce5b3..00000000 --- a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. - -package json - -import "strconv" - -const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" - -var _tokenType_map = map[tokenType]string{ - 0: _tokenType_name[0:12], - 44: _tokenType_name[12:22], - 58: _tokenType_name[22:32], - 61: _tokenType_name[32:43], - 75: _tokenType_name[43:55], - 78: _tokenType_name[55:66], - 83: _tokenType_name[66:77], - 91: _tokenType_name[77:88], - 93: _tokenType_name[88:99], - 123: _tokenType_name[99:110], - 125: _tokenType_name[110:121], - 9220: _tokenType_name[121:129], -} - -func (i tokenType) String() string { - if str, ok := _tokenType_map[i]; ok { - return str - } - return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/github.com/hashicorp/hil/.gitignore b/vendor/github.com/hashicorp/hil/.gitignore deleted file mode 100644 index 9d6e5df3..00000000 --- a/vendor/github.com/hashicorp/hil/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.DS_Store -.idea -*.iml diff --git a/vendor/github.com/hashicorp/hil/.travis.yml b/vendor/github.com/hashicorp/hil/.travis.yml deleted file mode 100644 index a7854442..00000000 --- a/vendor/github.com/hashicorp/hil/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -sudo: false -language: go -go: 1.7 diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE deleted file mode 100644 index 82b4de97..00000000 --- a/vendor/github.com/hashicorp/hil/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md deleted file mode 100644 index 186ed251..00000000 --- a/vendor/github.com/hashicorp/hil/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# HIL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) - -HIL (HashiCorp Interpolation Language) is a lightweight embedded language used -primarily for configuration interpolation. The goal of HIL is to make a simple -language for interpolations in the various configurations of HashiCorp tools. - -HIL is built to interpolate any string, but is in use by HashiCorp primarily -with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any -way for use with HIL. - -HIL isn't meant to be a general purpose language. It was built for basic -configuration interpolations. Therefore, you can't currently write functions, -have conditionals, set intermediary variables, etc. within HIL itself. It is -possible some of these may be added later but the right use case must exist. - -## Why? - -Many of our tools have support for something similar to templates, but -within the configuration itself. The most prominent requirement was in -[Terraform](https://github.com/hashicorp/terraform) where we wanted the -configuration to be able to reference values from elsewhere in the -configuration. Example: - - foo = "hi ${var.world}" - -We originally used a full templating language for this, but found it -was too heavy weight. Additionally, many full languages required bindings -to C (and thus the usage of cgo) which we try to avoid to make cross-compilation -easier. We then moved to very basic regular expression based -string replacement, but found the need for basic arithmetic and function -calls resulting in overly complex regular expressions. - -Ultimately, we wrote our own mini-language within Terraform itself. As -we built other projects such as [Nomad](https://nomadproject.io) and -[Otto](https://ottoproject.io), the need for basic interpolations arose -again. - -Thus HIL was born. It is extracted from Terraform, cleaned up, and -better tested for general purpose use. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammer is listed here. - -Code begins within `${` and `}`. Outside of this, text is treated -literally. For example, `foo` is a valid HIL program that is just the -string "foo", but `foo ${bar}` is an HIL program that is the string "foo " -concatened with the value of `bar`. For the remainder of the syntax -docs, we'll assume you're within `${}`. - - * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example - identifiers: `foo`, `var.foo`, `foo-bar`. - - * Strings are double quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Numbers are assumed to be base 10. If you prefix a number with 0x, - it is treated as a hexadecimal. If it is prefixed with 0, it is - treated as an octal. Numbers can be in scientific notation: "1e10". - - * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2` - - * Boolean values: `true`, `false` - - * The following arithmetic operations are allowed: +, -, *, /, %. - - * Function calls are in the form of `name(arg1, arg2, ...)`. Example: - `add(1, 5)`. Arguments can be any valid HIL expression, example: - `add(1, var.foo)` or even nested function calls: - `add(1, get("some value"))`. - - * Within strings, further interpolations can be opened with `${}`. - Example: `"Hello ${nested}"`. A full example including the - original `${}` (remember this list assumes were inside of one - already) could be: `foo ${func("hello ${var.foo}")}`. - -## Language Changes - -We've used this mini-language in Terraform for years. For backwards compatibility -reasons, we're unlikely to make an incompatible change to the language but -we're not currently making that promise, either. - -The internal API of this project may very well change as we evolve it -to work with more of our projects. We recommend using some sort of dependency -management solution with this package. - -## Future Changes - -The following changes are already planned to be made at some point: - - * Richer types: lists, maps, etc. - - * Convert to a more standard Go parser structure similar to HCL. This - will improve our error messaging as well as allow us to have automatic - formatting. - - * Allow interpolations to result in more types than just a string. While - within the interpolation basic types are honored, the result is always - a string. diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml deleted file mode 100644 index feaf7a34..00000000 --- a/vendor/github.com/hashicorp/hil/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\src\github.com\hashicorp\hil -environment: - GOPATH: c:\gopath -init: - - git config --global core.autocrlf true -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go deleted file mode 100644 index 94dc24f8..00000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic.go +++ /dev/null @@ -1,43 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Arithmetic represents a node where the result is arithmetic of -// two or more operands in the order given. -type Arithmetic struct { - Op ArithmeticOp - Exprs []Node - Posx Pos -} - -func (n *Arithmetic) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Arithmetic) Pos() Pos { - return n.Posx -} - -func (n *Arithmetic) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Arithmetic) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Arithmetic) Type(Scope) (Type, error) { - return TypeInt, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go deleted file mode 100644 index 18880c60..00000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go +++ /dev/null @@ -1,24 +0,0 @@ -package ast - -// ArithmeticOp is the operation to use for the math. -type ArithmeticOp int - -const ( - ArithmeticOpInvalid ArithmeticOp = 0 - - ArithmeticOpAdd ArithmeticOp = iota - ArithmeticOpSub - ArithmeticOpMul - ArithmeticOpDiv - ArithmeticOpMod - - ArithmeticOpLogicalAnd - ArithmeticOpLogicalOr - - ArithmeticOpEqual - ArithmeticOpNotEqual - ArithmeticOpLessThan - ArithmeticOpLessThanOrEqual - ArithmeticOpGreaterThan - ArithmeticOpGreaterThanOrEqual -) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go deleted file mode 100644 index c6350f8b..00000000 --- a/vendor/github.com/hashicorp/hil/ast/ast.go +++ /dev/null @@ -1,99 +0,0 @@ -package ast - -import ( - "fmt" -) - -// Node is the interface that all AST nodes must implement. -type Node interface { - // Accept is called to dispatch to the visitors. It must return the - // resulting Node (which might be different in an AST transform). - Accept(Visitor) Node - - // Pos returns the position of this node in some source. - Pos() Pos - - // Type returns the type of this node for the given context. - Type(Scope) (Type, error) -} - -// Pos is the starting position of an AST node -type Pos struct { - Column, Line int // Column/Line number, starting at 1 - Filename string // Optional source filename, if known -} - -func (p Pos) String() string { - if p.Filename == "" { - return fmt.Sprintf("%d:%d", p.Line, p.Column) - } else { - return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) - } -} - -// InitPos is an initiaial position value. This should be used as -// the starting position (presets the column and line to 1). -var InitPos = Pos{Column: 1, Line: 1} - -// Visitors are just implementations of this function. -// -// The function must return the Node to replace this node with. "nil" is -// _not_ a valid return value. If there is no replacement, the original node -// should be returned. We build this replacement directly into the visitor -// pattern since AST transformations are a common and useful tool and -// building it into the AST itself makes it required for future Node -// implementations and very easy to do. -// -// Note that this isn't a true implementation of the visitor pattern, which -// generally requires proper type dispatch on the function. However, -// implementing this basic visitor pattern style is still very useful even -// if you have to type switch. -type Visitor func(Node) Node - -//go:generate stringer -type=Type - -// Type is the type of any value. -type Type uint32 - -const ( - TypeInvalid Type = 0 - TypeAny Type = 1 << iota - TypeBool - TypeString - TypeInt - TypeFloat - TypeList - TypeMap - - // This is a special type used by Terraform to mark "unknown" values. - // It is impossible for this type to be introduced into your HIL programs - // unless you explicitly set a variable to this value. In that case, - // any operation including the variable will return "TypeUnknown" as the - // type. - TypeUnknown -) - -func (t Type) Printable() string { - switch t { - case TypeInvalid: - return "invalid type" - case TypeAny: - return "any type" - case TypeBool: - return "type bool" - case TypeString: - return "type string" - case TypeInt: - return "type int" - case TypeFloat: - return "type float" - case TypeList: - return "type list" - case TypeMap: - return "type map" - case TypeUnknown: - return "type unknown" - default: - return "unknown type" - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go deleted file mode 100644 index 05570110..00000000 --- a/vendor/github.com/hashicorp/hil/ast/call.go +++ /dev/null @@ -1,47 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Call represents a function call. -type Call struct { - Func string - Args []Node - Posx Pos -} - -func (n *Call) Accept(v Visitor) Node { - for i, a := range n.Args { - n.Args[i] = a.Accept(v) - } - - return v(n) -} - -func (n *Call) Pos() Pos { - return n.Posx -} - -func (n *Call) String() string { - args := make([]string, len(n.Args)) - for i, arg := range n.Args { - args[i] = fmt.Sprintf("%s", arg) - } - - return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) -} - -func (n *Call) Type(s Scope) (Type, error) { - f, ok := s.LookupFunc(n.Func) - if !ok { - return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) - } - - return f.ReturnType, nil -} - -func (n *Call) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go deleted file mode 100644 index be48f89d..00000000 --- a/vendor/github.com/hashicorp/hil/ast/conditional.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -type Conditional struct { - CondExpr Node - TrueExpr Node - FalseExpr Node - Posx Pos -} - -// Accept passes the given visitor to the child nodes in this order: -// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. -func (n *Conditional) Accept(v Visitor) Node { - n.CondExpr = n.CondExpr.Accept(v) - n.TrueExpr = n.TrueExpr.Accept(v) - n.FalseExpr = n.FalseExpr.Accept(v) - - return v(n) -} - -func (n *Conditional) Pos() Pos { - return n.Posx -} - -func (n *Conditional) Type(Scope) (Type, error) { - // This is not actually a useful value; the type checker ignores - // this function when analyzing conditionals, just as with Arithmetic. - return TypeInt, nil -} - -func (n *Conditional) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go deleted file mode 100644 index 860c25fd..00000000 --- a/vendor/github.com/hashicorp/hil/ast/index.go +++ /dev/null @@ -1,76 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Index represents an indexing operation into another data structure -type Index struct { - Target Node - Key Node - Posx Pos -} - -func (n *Index) Accept(v Visitor) Node { - n.Target = n.Target.Accept(v) - n.Key = n.Key.Accept(v) - return v(n) -} - -func (n *Index) Pos() Pos { - return n.Posx -} - -func (n *Index) String() string { - return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) -} - -func (n *Index) Type(s Scope) (Type, error) { - variableAccess, ok := n.Target.(*VariableAccess) - if !ok { - return TypeInvalid, fmt.Errorf("target is not a variable") - } - - variable, ok := s.LookupVar(variableAccess.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) - } - - switch variable.Type { - case TypeList: - return n.typeList(variable, variableAccess.Name) - case TypeMap: - return n.typeMap(variable, variableAccess.Name) - default: - return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (n *Index) typeList(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a list - list := variable.Value.([]Variable) - - return VariableListElementTypesAreHomogenous(variableName, list) -} - -func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a map - vmap := variable.Value.(map[string]Variable) - - return VariableMapValueTypesAreHomogenous(variableName, vmap) -} - -func reportTypes(typesFound map[Type]struct{}) string { - stringTypes := make([]string, len(typesFound)) - i := 0 - for k, _ := range typesFound { - stringTypes[0] = k.String() - i++ - } - return strings.Join(stringTypes, ", ") -} - -func (n *Index) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go deleted file mode 100644 index da6014fe..00000000 --- a/vendor/github.com/hashicorp/hil/ast/literal.go +++ /dev/null @@ -1,88 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// LiteralNode represents a single literal value, such as "foo" or -// 42 or 3.14159. Based on the Type, the Value can be safely cast. -type LiteralNode struct { - Value interface{} - Typex Type - Posx Pos -} - -// NewLiteralNode returns a new literal node representing the given -// literal Go value, which must correspond to one of the primitive types -// supported by HIL. Lists and maps cannot currently be constructed via -// this function. -// -// If an inappropriately-typed value is provided, this function will -// return an error. The main intended use of this function is to produce -// "synthetic" literals from constants in code, where the value type is -// well known at compile time. To easily store these in global variables, -// see also MustNewLiteralNode. -func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { - goType := reflect.TypeOf(value) - var hilType Type - - switch goType.Kind() { - case reflect.Bool: - hilType = TypeBool - case reflect.Int: - hilType = TypeInt - case reflect.Float64: - hilType = TypeFloat - case reflect.String: - hilType = TypeString - default: - return nil, fmt.Errorf("unsupported literal node type: %T", value) - } - - return &LiteralNode{ - Value: value, - Typex: hilType, - Posx: pos, - }, nil -} - -// MustNewLiteralNode wraps NewLiteralNode and panics if an error is -// returned, thus allowing valid literal nodes to be easily assigned to -// global variables. -func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { - node, err := NewLiteralNode(value, pos) - if err != nil { - panic(err) - } - return node -} - -func (n *LiteralNode) Accept(v Visitor) Node { - return v(n) -} - -func (n *LiteralNode) Pos() Pos { - return n.Posx -} - -func (n *LiteralNode) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *LiteralNode) String() string { - return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) -} - -func (n *LiteralNode) Type(Scope) (Type, error) { - return n.Typex, nil -} - -// IsUnknown returns true either if the node's value is itself unknown -// of if it is a collection containing any unknown elements, deeply. -func (n *LiteralNode) IsUnknown() bool { - return IsUnknown(Variable{ - Type: n.Typex, - Value: n.Value, - }) -} diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go deleted file mode 100644 index 1e27f970..00000000 --- a/vendor/github.com/hashicorp/hil/ast/output.go +++ /dev/null @@ -1,78 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Output represents the root node of all interpolation evaluations. If the -// output only has one expression which is either a TypeList or TypeMap, the -// Output can be type-asserted to []interface{} or map[string]interface{} -// respectively. Otherwise the Output evaluates as a string, and concatenates -// the evaluation of each expression. -type Output struct { - Exprs []Node - Posx Pos -} - -func (n *Output) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Output) Pos() Pos { - return n.Posx -} - -func (n *Output) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Output) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Output) Type(s Scope) (Type, error) { - // Special case no expressions for backward compatibility - if len(n.Exprs) == 0 { - return TypeString, nil - } - - // Special case a single expression of types list or map - if len(n.Exprs) == 1 { - exprType, err := n.Exprs[0].Type(s) - if err != nil { - return TypeInvalid, err - } - switch exprType { - case TypeList: - return TypeList, nil - case TypeMap: - return TypeMap, nil - } - } - - // Otherwise ensure all our expressions are strings - for index, expr := range n.Exprs { - exprType, err := expr.Type(s) - if err != nil { - return TypeInvalid, err - } - // We only look for things we know we can't coerce with an implicit conversion func - if exprType == TypeList || exprType == TypeMap { - return TypeInvalid, fmt.Errorf( - "multi-expression HIL outputs may only have string inputs: %d is type %s", - index, exprType) - } - } - - return TypeString, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go deleted file mode 100644 index 7a975d99..00000000 --- a/vendor/github.com/hashicorp/hil/ast/scope.go +++ /dev/null @@ -1,90 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// Scope is the interface used to look up variables and functions while -// evaluating. How these functions/variables are defined are up to the caller. -type Scope interface { - LookupFunc(string) (Function, bool) - LookupVar(string) (Variable, bool) -} - -// Variable is a variable value for execution given as input to the engine. -// It records the value of a variables along with their type. -type Variable struct { - Value interface{} - Type Type -} - -// NewVariable creates a new Variable for the given value. This will -// attempt to infer the correct type. If it can't, an error will be returned. -func NewVariable(v interface{}) (result Variable, err error) { - switch v := reflect.ValueOf(v); v.Kind() { - case reflect.String: - result.Type = TypeString - default: - err = fmt.Errorf("Unknown type: %s", v.Kind()) - } - - result.Value = v - return -} - -// String implements Stringer on Variable, displaying the type and value -// of the Variable. -func (v Variable) String() string { - return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) -} - -// Function defines a function that can be executed by the engine. -// The type checker will validate that the proper types will be called -// to the callback. -type Function struct { - // ArgTypes is the list of types in argument order. These are the - // required arguments. - // - // ReturnType is the type of the returned value. The Callback MUST - // return this type. - ArgTypes []Type - ReturnType Type - - // Variadic, if true, says that this function is variadic, meaning - // it takes a variable number of arguments. In this case, the - // VariadicType must be set. - Variadic bool - VariadicType Type - - // Callback is the function called for a function. The argument - // types are guaranteed to match the spec above by the type checker. - // The length of the args is strictly == len(ArgTypes) unless Varidiac - // is true, in which case its >= len(ArgTypes). - Callback func([]interface{}) (interface{}, error) -} - -// BasicScope is a simple scope that looks up variables and functions -// using a map. -type BasicScope struct { - FuncMap map[string]Function - VarMap map[string]Variable -} - -func (s *BasicScope) LookupFunc(n string) (Function, bool) { - if s == nil { - return Function{}, false - } - - v, ok := s.FuncMap[n] - return v, ok -} - -func (s *BasicScope) LookupVar(n string) (Variable, bool) { - if s == nil { - return Variable{}, false - } - - v, ok := s.VarMap[n] - return v, ok -} diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go deleted file mode 100644 index bd2bc157..00000000 --- a/vendor/github.com/hashicorp/hil/ast/stack.go +++ /dev/null @@ -1,25 +0,0 @@ -package ast - -// Stack is a stack of Node. -type Stack struct { - stack []Node -} - -func (s *Stack) Len() int { - return len(s.stack) -} - -func (s *Stack) Push(n Node) { - s.stack = append(s.stack, n) -} - -func (s *Stack) Pop() Node { - x := s.stack[len(s.stack)-1] - s.stack[len(s.stack)-1] = nil - s.stack = s.stack[:len(s.stack)-1] - return x -} - -func (s *Stack) Reset() { - s.stack = nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go deleted file mode 100644 index 1f51a98d..00000000 --- a/vendor/github.com/hashicorp/hil/ast/type_string.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT - -package ast - -import "fmt" - -const ( - _Type_name_0 = "TypeInvalid" - _Type_name_1 = "TypeAny" - _Type_name_2 = "TypeBool" - _Type_name_3 = "TypeString" - _Type_name_4 = "TypeInt" - _Type_name_5 = "TypeFloat" - _Type_name_6 = "TypeList" - _Type_name_7 = "TypeMap" - _Type_name_8 = "TypeUnknown" -) - -var ( - _Type_index_0 = [...]uint8{0, 11} - _Type_index_1 = [...]uint8{0, 7} - _Type_index_2 = [...]uint8{0, 8} - _Type_index_3 = [...]uint8{0, 10} - _Type_index_4 = [...]uint8{0, 7} - _Type_index_5 = [...]uint8{0, 9} - _Type_index_6 = [...]uint8{0, 8} - _Type_index_7 = [...]uint8{0, 7} - _Type_index_8 = [...]uint8{0, 11} -) - -func (i Type) String() string { - switch { - case i == 0: - return _Type_name_0 - case i == 2: - return _Type_name_1 - case i == 4: - return _Type_name_2 - case i == 8: - return _Type_name_3 - case i == 16: - return _Type_name_4 - case i == 32: - return _Type_name_5 - case i == 64: - return _Type_name_6 - case i == 128: - return _Type_name_7 - case i == 256: - return _Type_name_8 - default: - return fmt.Sprintf("Type(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go deleted file mode 100644 index d6ddaecc..00000000 --- a/vendor/github.com/hashicorp/hil/ast/unknown.go +++ /dev/null @@ -1,30 +0,0 @@ -package ast - -// IsUnknown reports whether a variable is unknown or contains any value -// that is unknown. This will recurse into lists and maps and so on. -func IsUnknown(v Variable) bool { - // If it is unknown itself, return true - if v.Type == TypeUnknown { - return true - } - - // If it is a container type, check the values - switch v.Type { - case TypeList: - for _, el := range v.Value.([]Variable) { - if IsUnknown(el) { - return true - } - } - case TypeMap: - for _, el := range v.Value.(map[string]Variable) { - if IsUnknown(el) { - return true - } - } - default: - } - - // Not a container type or survive the above checks - return false -} diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go deleted file mode 100644 index 4c1362d7..00000000 --- a/vendor/github.com/hashicorp/hil/ast/variable_access.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -// VariableAccess represents a variable access. -type VariableAccess struct { - Name string - Posx Pos -} - -func (n *VariableAccess) Accept(v Visitor) Node { - return v(n) -} - -func (n *VariableAccess) Pos() Pos { - return n.Posx -} - -func (n *VariableAccess) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *VariableAccess) String() string { - return fmt.Sprintf("Variable(%s)", n.Name) -} - -func (n *VariableAccess) Type(s Scope) (Type, error) { - v, ok := s.LookupVar(n.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) - } - - return v.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go deleted file mode 100644 index 06bd18de..00000000 --- a/vendor/github.com/hashicorp/hil/ast/variables_helper.go +++ /dev/null @@ -1,63 +0,0 @@ -package ast - -import "fmt" - -func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { - if len(list) == 0 { - return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range list { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "list %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} - -func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { - if len(vmap) == 0 { - return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range vmap { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "map %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go deleted file mode 100644 index 909c788a..00000000 --- a/vendor/github.com/hashicorp/hil/builtins.go +++ /dev/null @@ -1,331 +0,0 @@ -package hil - -import ( - "errors" - "strconv" - - "github.com/hashicorp/hil/ast" -) - -// NOTE: All builtins are tested in engine_test.go - -func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { - if scope == nil { - scope = new(ast.BasicScope) - } - if scope.FuncMap == nil { - scope.FuncMap = make(map[string]ast.Function) - } - - // Implicit conversions - scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() - scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() - scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() - scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() - scope.FuncMap["__builtin_IntToString"] = builtinIntToString() - scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() - scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() - scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() - - // Math operations - scope.FuncMap["__builtin_IntMath"] = builtinIntMath() - scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() - scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() - scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() - scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() - scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() - scope.FuncMap["__builtin_Logical"] = builtinLogical() - return scope -} - -func builtinFloatMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeFloat, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(float64) - for _, raw := range args[2:] { - arg := raw.(float64) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - result /= arg - } - } - - return result, nil - }, - } -} - -func builtinIntMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeInt, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(int) - for _, raw := range args[2:] { - arg := raw.(int) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result /= arg - case ast.ArithmeticOpMod: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result = result % arg - } - } - - return result, nil - }, - } -} - -func builtinBoolCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(bool) - rhs := args[2].(bool) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinFloatCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(float64) - rhs := args[2].(float64) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinIntCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(int) - rhs := args[2].(int) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinStringCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(string) - rhs := args[2].(string) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinLogical() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeBool, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(bool) - for _, raw := range args[2:] { - arg := raw.(bool) - switch op { - case ast.ArithmeticOpLogicalOr: - result = result || arg - case ast.ArithmeticOpLogicalAnd: - result = result && arg - default: - return nil, errors.New("invalid logical operator") - } - } - - return result, nil - }, - } -} - -func builtinFloatToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - return int(args[0].(float64)), nil - }, - } -} - -func builtinFloatToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatFloat( - args[0].(float64), 'g', -1, 64), nil - }, - } -} - -func builtinIntToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - return float64(args[0].(int)), nil - }, - } -} - -func builtinIntToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatInt(int64(args[0].(int)), 10), nil - }, - } -} - -func builtinStringToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseInt(args[0].(string), 0, 0) - if err != nil { - return nil, err - } - - return int(v), nil - }, - } -} - -func builtinStringToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseFloat(args[0].(string), 64) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} - -func builtinBoolToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeBool}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatBool(args[0].(bool)), nil - }, - } -} - -func builtinStringToBool() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseBool(args[0].(string)) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go deleted file mode 100644 index 474f5058..00000000 --- a/vendor/github.com/hashicorp/hil/check_identifier.go +++ /dev/null @@ -1,88 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// IdentifierCheck is a SemanticCheck that checks that all identifiers -// resolve properly and that the right number of arguments are passed -// to functions. -type IdentifierCheck struct { - Scope ast.Scope - - err error - lock sync.Mutex -} - -func (c *IdentifierCheck) Visit(root ast.Node) error { - c.lock.Lock() - defer c.lock.Unlock() - defer c.reset() - root.Accept(c.visit) - return c.err -} - -func (c *IdentifierCheck) visit(raw ast.Node) ast.Node { - if c.err != nil { - return raw - } - - switch n := raw.(type) { - case *ast.Call: - c.visitCall(n) - case *ast.VariableAccess: - c.visitVariableAccess(n) - case *ast.Output: - // Ignore - case *ast.LiteralNode: - // Ignore - default: - // Ignore - } - - // We never do replacement with this visitor - return raw -} - -func (c *IdentifierCheck) visitCall(n *ast.Call) { - // Look up the function in the map - function, ok := c.Scope.LookupFunc(n.Func) - if !ok { - c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func)) - return - } - - // Break up the args into what is variadic and what is required - args := n.Args - if function.Variadic && len(args) > len(function.ArgTypes) { - args = n.Args[:len(function.ArgTypes)] - } - - // Verify the number of arguments - if len(args) != len(function.ArgTypes) { - c.createErr(n, fmt.Sprintf( - "%s: expected %d arguments, got %d", - n.Func, len(function.ArgTypes), len(n.Args))) - return - } -} - -func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) { - // Look up the variable in the map - if _, ok := c.Scope.LookupVar(n.Name); !ok { - c.createErr(n, fmt.Sprintf( - "unknown variable accessed: %s", n.Name)) - return - } -} - -func (c *IdentifierCheck) createErr(n ast.Node, str string) { - c.err = fmt.Errorf("%s: %s", n.Pos(), str) -} - -func (c *IdentifierCheck) reset() { - c.err = nil -} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go deleted file mode 100644 index f16da391..00000000 --- a/vendor/github.com/hashicorp/hil/check_types.go +++ /dev/null @@ -1,668 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// TypeCheck implements ast.Visitor for type checking an AST tree. -// It requires some configuration to look up the type of nodes. -// -// It also optionally will not type error and will insert an implicit -// type conversions for specific types if specified by the Implicit -// field. Note that this is kind of organizationally weird to put into -// this structure but we'd rather do that than duplicate the type checking -// logic multiple times. -type TypeCheck struct { - Scope ast.Scope - - // Implicit is a map of implicit type conversions that we can do, - // and that shouldn't error. The key of the first map is the from type, - // the key of the second map is the to type, and the final string - // value is the function to call (which must be registered in the Scope). - Implicit map[ast.Type]map[ast.Type]string - - // Stack of types. This shouldn't be used directly except by implementations - // of TypeCheckNode. - Stack []ast.Type - - err error - lock sync.Mutex -} - -// TypeCheckNode is the interface that must be implemented by any -// ast.Node that wants to support type-checking. If the type checker -// encounters a node that doesn't implement this, it will error. -type TypeCheckNode interface { - TypeCheck(*TypeCheck) (ast.Node, error) -} - -func (v *TypeCheck) Visit(root ast.Node) error { - v.lock.Lock() - defer v.lock.Unlock() - defer v.reset() - root.Accept(v.visit) - - // If the resulting type is unknown, then just let the whole thing go. - if v.err == errExitUnknown { - v.err = nil - } - - return v.err -} - -func (v *TypeCheck) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - var result ast.Node - var err error - switch n := raw.(type) { - case *ast.Arithmetic: - tc := &typeCheckArithmetic{n} - result, err = tc.TypeCheck(v) - case *ast.Call: - tc := &typeCheckCall{n} - result, err = tc.TypeCheck(v) - case *ast.Conditional: - tc := &typeCheckConditional{n} - result, err = tc.TypeCheck(v) - case *ast.Index: - tc := &typeCheckIndex{n} - result, err = tc.TypeCheck(v) - case *ast.Output: - tc := &typeCheckOutput{n} - result, err = tc.TypeCheck(v) - case *ast.LiteralNode: - tc := &typeCheckLiteral{n} - result, err = tc.TypeCheck(v) - case *ast.VariableAccess: - tc := &typeCheckVariableAccess{n} - result, err = tc.TypeCheck(v) - default: - tc, ok := raw.(TypeCheckNode) - if !ok { - err = fmt.Errorf("unknown node for type check: %#v", raw) - break - } - - result, err = tc.TypeCheck(v) - } - - if err != nil { - pos := raw.Pos() - v.err = fmt.Errorf("At column %d, line %d: %s", - pos.Column, pos.Line, err) - } - - return result -} - -type typeCheckArithmetic struct { - n *ast.Arithmetic -} - -func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { - // The arguments are on the stack in reverse order, so pop them off. - exprs := make([]ast.Type, len(tc.n.Exprs)) - for i, _ := range tc.n.Exprs { - exprs[len(tc.n.Exprs)-1-i] = v.StackPop() - } - - // If any operand is unknown then our result is automatically unknown - for _, ty := range exprs { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - switch tc.n.Op { - case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: - return tc.checkLogical(v, exprs) - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, - ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, - ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: - return tc.checkComparison(v, exprs) - default: - return tc.checkNumeric(v, exprs) - } - -} - -func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - // Determine the resulting type we want. We do this by going over - // every expression until we find one with a type we recognize. - // We do this because the first expr might be a string ("var.foo") - // and we need to know what to implicit to. - mathFunc := "__builtin_IntMath" - mathType := ast.TypeInt - for _, v := range exprs { - // We assume int math but if we find ANY float, the entire - // expression turns into floating point math. - if v == ast.TypeFloat { - mathFunc = "__builtin_FloatMath" - mathType = v - break - } - } - - // Verify the args - for i, arg := range exprs { - if arg != mathType { - cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, mathType, arg) - } - } - - // Modulo doesn't work for floats - if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod { - return nil, fmt.Errorf("modulo cannot be used with floats") - } - - // Return type - v.StackPush(mathType) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: mathFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - if len(exprs) != 2 { - // This should never happen, because the parser never produces - // nodes that violate this. - return nil, fmt.Errorf( - "comparison operators must have exactly two operands", - ) - } - - // The first operand always dictates the type for a comparison. - compareFunc := "" - compareType := exprs[0] - switch compareType { - case ast.TypeBool: - compareFunc = "__builtin_BoolCompare" - case ast.TypeFloat: - compareFunc = "__builtin_FloatCompare" - case ast.TypeInt: - compareFunc = "__builtin_IntCompare" - case ast.TypeString: - compareFunc = "__builtin_StringCompare" - default: - return nil, fmt.Errorf( - "comparison operators apply only to bool, float, int, and string", - ) - } - - // For non-equality comparisons, we will do implicit conversions to - // integer types if possible. In this case, we need to go through and - // determine the type of comparison we're doing to enable the implicit - // conversion. - if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { - compareFunc = "__builtin_IntCompare" - compareType = ast.TypeInt - for _, expr := range exprs { - if expr == ast.TypeFloat { - compareFunc = "__builtin_FloatCompare" - compareType = ast.TypeFloat - break - } - } - } - - // Verify (and possibly, convert) the args - for i, arg := range exprs { - if arg != compareType { - cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, compareType, arg, - ) - } - } - - // Only ints and floats can have the <, >, <= and >= operators applied - switch tc.n.Op { - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: - // anything goes - default: - switch compareType { - case ast.TypeFloat, ast.TypeInt: - // fine - default: - return nil, fmt.Errorf( - "<, >, <= and >= may apply only to int and float values", - ) - } - } - - // Comparison operators always return bool - v.StackPush(ast.TypeBool) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: compareFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - for i, t := range exprs { - if t != ast.TypeBool { - cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) - if cn == nil { - return nil, fmt.Errorf( - "logical operators require boolean operands, not %s", - t, - ) - } - tc.n.Exprs[i] = cn - } - } - - // Return type is always boolean - v.StackPush(ast.TypeBool) - - // Arithmetic nodes are replaced with a call to a built-in function - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: "__builtin_Logical", - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -type typeCheckCall struct { - n *ast.Call -} - -func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the function in the map - function, ok := v.Scope.LookupFunc(tc.n.Func) - if !ok { - return nil, fmt.Errorf("unknown function called: %s", tc.n.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]ast.Type, len(tc.n.Args)) - for i, _ := range tc.n.Args { - args[len(tc.n.Args)-1-i] = v.StackPop() - } - - // Verify the args - for i, expected := range function.ArgTypes { - if expected == ast.TypeAny { - continue - } - - if args[i] == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if args[i] != expected { - cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) - if cn != nil { - tc.n.Args[i] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, i+1, expected.Printable(), args[i].Printable()) - } - } - - // If we're variadic, then verify the types there - if function.Variadic && function.VariadicType != ast.TypeAny { - args = args[len(function.ArgTypes):] - for i, t := range args { - if t == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if t != function.VariadicType { - realI := i + len(function.ArgTypes) - cn := v.ImplicitConversion( - t, function.VariadicType, tc.n.Args[realI]) - if cn != nil { - tc.n.Args[realI] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, realI, - function.VariadicType.Printable(), t.Printable()) - } - } - } - - // Return type - v.StackPush(function.ReturnType) - - return tc.n, nil -} - -type typeCheckConditional struct { - n *ast.Conditional -} - -func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { - // On the stack we have the types of the condition, true and false - // expressions, but they are in reverse order. - falseType := v.StackPop() - trueType := v.StackPop() - condType := v.StackPop() - - if condType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if condType != ast.TypeBool { - cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) - if cn == nil { - return nil, fmt.Errorf( - "condition must be type bool, not %s", condType.Printable(), - ) - } - tc.n.CondExpr = cn - } - - // The types of the true and false expression must match - if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { - - // Since passing around stringified versions of other types is - // common, we pragmatically allow the false expression to dictate - // the result type when the true expression is a string. - if trueType == ast.TypeString { - cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.TrueExpr = cn - trueType = falseType - } else { - cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.FalseExpr = cn - falseType = trueType - } - } - - // Currently list and map types cannot be used, because we cannot - // generally assert that their element types are consistent. - // Such support might be added later, either by improving the type - // system or restricting usage to only variable and literal expressions, - // but for now this is simply prohibited because it doesn't seem to - // be a common enough case to be worth the complexity. - switch trueType { - case ast.TypeList: - return nil, fmt.Errorf( - "conditional operator cannot be used with list values", - ) - case ast.TypeMap: - return nil, fmt.Errorf( - "conditional operator cannot be used with map values", - ) - } - - // Result type (guaranteed to also match falseType due to the above) - if trueType == ast.TypeUnknown { - // falseType may also be unknown, but that's okay because two - // unknowns means our result is unknown anyway. - v.StackPush(falseType) - } else { - v.StackPush(trueType) - } - - return tc.n, nil -} - -type typeCheckOutput struct { - n *ast.Output -} - -func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { - n := tc.n - types := make([]ast.Type, len(n.Exprs)) - for i, _ := range n.Exprs { - types[len(n.Exprs)-1-i] = v.StackPop() - } - - for _, ty := range types { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - // If there is only one argument and it is a list, we evaluate to a list - if len(types) == 1 { - switch t := types[0]; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - v.StackPush(t) - return n, nil - } - } - - // Otherwise, all concat args must be strings, so validate that - resultType := ast.TypeString - for i, t := range types { - - if t == ast.TypeUnknown { - resultType = ast.TypeUnknown - continue - } - - if t != ast.TypeString { - cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) - if cn != nil { - n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t) - } - } - - // This always results in type string, unless there are unknowns - v.StackPush(resultType) - - return n, nil -} - -type typeCheckLiteral struct { - n *ast.LiteralNode -} - -func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) { - v.StackPush(tc.n.Typex) - return tc.n, nil -} - -type typeCheckVariableAccess struct { - n *ast.VariableAccess -} - -func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the variable in the map - variable, ok := v.Scope.LookupVar(tc.n.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", tc.n.Name) - } - - // Add the type to the stack - v.StackPush(variable.Type) - - return tc.n, nil -} - -type typeCheckIndex struct { - n *ast.Index -} - -func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { - keyType := v.StackPop() - targetType := v.StackPop() - - if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - // Ensure we have a VariableAccess as the target - varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) - if !ok { - return nil, fmt.Errorf( - "target of an index must be a VariableAccess node, was %T", tc.n.Target) - } - - // Get the variable - variable, ok := v.Scope.LookupVar(varAccessNode.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", varAccessNode.Name) - } - - switch targetType { - case ast.TypeList: - if keyType != ast.TypeInt { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be an int, was %s", keyType) - } - } - - valType, err := ast.VariableListElementTypesAreHomogenous( - varAccessNode.Name, variable.Value.([]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - case ast.TypeMap: - if keyType != ast.TypeString { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be a string, was %s", keyType) - } - } - - valType, err := ast.VariableMapValueTypesAreHomogenous( - varAccessNode.Name, variable.Value.(map[string]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - default: - return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (v *TypeCheck) ImplicitConversion( - actual ast.Type, expected ast.Type, n ast.Node) ast.Node { - if v.Implicit == nil { - return nil - } - - fromMap, ok := v.Implicit[actual] - if !ok { - return nil - } - - toFunc, ok := fromMap[expected] - if !ok { - return nil - } - - return &ast.Call{ - Func: toFunc, - Args: []ast.Node{n}, - Posx: n.Pos(), - } -} - -func (v *TypeCheck) reset() { - v.Stack = nil - v.err = nil -} - -func (v *TypeCheck) StackPush(t ast.Type) { - v.Stack = append(v.Stack, t) -} - -func (v *TypeCheck) StackPop() ast.Type { - var x ast.Type - x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] - return x -} - -func (v *TypeCheck) StackPeek() ast.Type { - if len(v.Stack) == 0 { - return ast.TypeInvalid - } - - return v.Stack[len(v.Stack)-1] -} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go deleted file mode 100644 index 184e029b..00000000 --- a/vendor/github.com/hashicorp/hil/convert.go +++ /dev/null @@ -1,174 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/mapstructure" -) - -// UnknownValue is a sentinel value that can be used to denote -// that a value of a variable (or map element, list element, etc.) -// is unknown. This will always have the type ast.TypeUnknown. -const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -var hilMapstructureDecodeHookSlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookMap map[string]interface{} - -// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode -// but has a DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert %s into a %s", source, target) - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -func InterfaceToVariable(input interface{}) (ast.Variable, error) { - if iv, ok := input.(ast.Variable); ok { - return iv, nil - } - - // This is just to maintain backward compatibility - // after https://github.com/mitchellh/mapstructure/pull/98 - if v, ok := input.([]ast.Variable); ok { - return ast.Variable{ - Type: ast.TypeList, - Value: v, - }, nil - } - if v, ok := input.(map[string]ast.Variable); ok { - return ast.Variable{ - Type: ast.TypeMap, - Value: v, - }, nil - } - - var stringVal string - if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { - // Special case the unknown value to turn into "unknown" - if stringVal == UnknownValue { - return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil - } - - // Otherwise return the string value - return ast.Variable{ - Type: ast.TypeString, - Value: stringVal, - }, nil - } - - var mapVal map[string]interface{} - if err := hilMapstructureWeakDecode(input, &mapVal); err == nil { - elements := make(map[string]ast.Variable) - for i, element := range mapVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeMap, - Value: elements, - }, nil - } - - var sliceVal []interface{} - if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil { - elements := make([]ast.Variable, len(sliceVal)) - for i, element := range sliceVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeList, - Value: elements, - }, nil - } - - return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) -} - -func VariableToInterface(input ast.Variable) (interface{}, error) { - if input.Type == ast.TypeString { - if inputStr, ok := input.Value.(string); ok { - return inputStr, nil - } else { - return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") - } - } - - if input.Type == ast.TypeList { - inputList, ok := input.Value.([]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") - } - - result := make([]interface{}, 0) - if len(inputList) == 0 { - return result, nil - } - - for _, element := range inputList { - if convertedElement, err := VariableToInterface(element); err == nil { - result = append(result, convertedElement) - } else { - return nil, err - } - } - - return result, nil - } - - if input.Type == ast.TypeMap { - inputMap, ok := input.Value.(map[string]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") - } - - result := make(map[string]interface{}, 0) - if len(inputMap) == 0 { - return result, nil - } - - for key, value := range inputMap { - if convertedValue, err := VariableToInterface(value); err == nil { - result[key] = convertedValue - } else { - return nil, err - } - } - - return result, nil - } - - return nil, fmt.Errorf("unknown input type: %s", input.Type) -} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go deleted file mode 100644 index 27820769..00000000 --- a/vendor/github.com/hashicorp/hil/eval.go +++ /dev/null @@ -1,472 +0,0 @@ -package hil - -import ( - "bytes" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// EvalConfig is the configuration for evaluating. -type EvalConfig struct { - // GlobalScope is the global scope of execution for evaluation. - GlobalScope *ast.BasicScope - - // SemanticChecks is a list of additional semantic checks that will be run - // on the tree prior to evaluating it. The type checker, identifier checker, - // etc. will be run before these automatically. - SemanticChecks []SemanticChecker -} - -// SemanticChecker is the type that must be implemented to do a -// semantic check on an AST tree. This will be called with the root node. -type SemanticChecker func(ast.Node) error - -// EvaluationResult is a struct returned from the hil.Eval function, -// representing the result of an interpolation. Results are returned in their -// "natural" Go structure rather than in terms of the HIL AST. For the types -// currently implemented, this means that the Value field can be interpreted as -// the following Go types: -// TypeInvalid: undefined -// TypeString: string -// TypeList: []interface{} -// TypeMap: map[string]interface{} -// TypBool: bool -type EvaluationResult struct { - Type EvalType - Value interface{} -} - -// InvalidResult is a structure representing the result of a HIL interpolation -// which has invalid syntax, missing variables, or some other type of error. -// The error is described out of band in the accompanying error return value. -var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} - -// errExitUnknown is an internal error that when returned means the result -// is an unknown value. We use this for early exit. -var errExitUnknown = errors.New("unknown value") - -func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { - output, outputType, err := internalEval(root, config) - if err != nil { - return InvalidResult, err - } - - // If the result contains any nested unknowns then the result as a whole - // is unknown, so that callers only have to deal with "entirely known" - // or "entirely unknown" as outcomes. - if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { - outputType = ast.TypeUnknown - output = UnknownValue - } - - switch outputType { - case ast.TypeList: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeList, - Value: output, - }) - return EvaluationResult{ - Type: TypeList, - Value: val, - }, err - case ast.TypeMap: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeMap, - Value: output, - }) - return EvaluationResult{ - Type: TypeMap, - Value: val, - }, err - case ast.TypeString: - return EvaluationResult{ - Type: TypeString, - Value: output, - }, nil - case ast.TypeBool: - return EvaluationResult{ - Type: TypeBool, - Value: output, - }, nil - case ast.TypeUnknown: - return EvaluationResult{ - Type: TypeUnknown, - Value: UnknownValue, - }, nil - default: - return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) - } -} - -// Eval evaluates the given AST tree and returns its output value, the type -// of the output, and any error that occurred. -func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) { - // Copy the scope so we can add our builtins - if config == nil { - config = new(EvalConfig) - } - scope := registerBuiltins(config.GlobalScope) - implicitMap := map[ast.Type]map[ast.Type]string{ - ast.TypeFloat: { - ast.TypeInt: "__builtin_FloatToInt", - ast.TypeString: "__builtin_FloatToString", - }, - ast.TypeInt: { - ast.TypeFloat: "__builtin_IntToFloat", - ast.TypeString: "__builtin_IntToString", - }, - ast.TypeString: { - ast.TypeInt: "__builtin_StringToInt", - ast.TypeFloat: "__builtin_StringToFloat", - ast.TypeBool: "__builtin_StringToBool", - }, - ast.TypeBool: { - ast.TypeString: "__builtin_BoolToString", - }, - } - - // Build our own semantic checks that we always run - tv := &TypeCheck{Scope: scope, Implicit: implicitMap} - ic := &IdentifierCheck{Scope: scope} - - // Build up the semantic checks for execution - checks := make( - []SemanticChecker, - len(config.SemanticChecks), - len(config.SemanticChecks)+2) - copy(checks, config.SemanticChecks) - checks = append(checks, ic.Visit) - checks = append(checks, tv.Visit) - - // Run the semantic checks - for _, check := range checks { - if err := check(root); err != nil { - return nil, ast.TypeInvalid, err - } - } - - // Execute - v := &evalVisitor{Scope: scope} - return v.Visit(root) -} - -// EvalNode is the interface that must be implemented by any ast.Node -// to support evaluation. This will be called in visitor pattern order. -// The result of each call to Eval is automatically pushed onto the -// stack as a LiteralNode. Pop elements off the stack to get child -// values. -type EvalNode interface { - Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) -} - -type evalVisitor struct { - Scope ast.Scope - Stack ast.Stack - - err error - lock sync.Mutex -} - -func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { - // Run the actual visitor pattern - root.Accept(v.visit) - - // Get our result and clear out everything else - var result *ast.LiteralNode - if v.Stack.Len() > 0 { - result = v.Stack.Pop().(*ast.LiteralNode) - } else { - result = new(ast.LiteralNode) - } - resultErr := v.err - if resultErr == errExitUnknown { - // This means the return value is unknown and we used the error - // as an early exit mechanism. Reset since the value on the stack - // should be the unknown value. - resultErr = nil - } - - // Clear everything else so we aren't just dangling - v.Stack.Reset() - v.err = nil - - t, err := result.Type(v.Scope) - if err != nil { - return nil, ast.TypeInvalid, err - } - - return result.Value, t, resultErr -} - -func (v *evalVisitor) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - en, err := evalNode(raw) - if err != nil { - v.err = err - return raw - } - - out, outType, err := en.Eval(v.Scope, &v.Stack) - if err != nil { - v.err = err - return raw - } - - v.Stack.Push(&ast.LiteralNode{ - Value: out, - Typex: outType, - }) - - if outType == ast.TypeUnknown { - // Halt immediately - v.err = errExitUnknown - return raw - } - - return raw -} - -// evalNode is a private function that returns an EvalNode for built-in -// types as well as any other EvalNode implementations. -func evalNode(raw ast.Node) (EvalNode, error) { - switch n := raw.(type) { - case *ast.Index: - return &evalIndex{n}, nil - case *ast.Call: - return &evalCall{n}, nil - case *ast.Conditional: - return &evalConditional{n}, nil - case *ast.Output: - return &evalOutput{n}, nil - case *ast.LiteralNode: - return &evalLiteralNode{n}, nil - case *ast.VariableAccess: - return &evalVariableAccess{n}, nil - default: - en, ok := n.(EvalNode) - if !ok { - return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw) - } - - return en, nil - } -} - -type evalCall struct{ *ast.Call } - -func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // Look up the function in the map - function, ok := s.LookupFunc(v.Func) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown function called: %s", v.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]interface{}, len(v.Args)) - for i, _ := range v.Args { - node := stack.Pop().(*ast.LiteralNode) - if node.IsUnknown() { - // If any arguments are unknown then the result is automatically unknown - return UnknownValue, ast.TypeUnknown, nil - } - args[len(v.Args)-1-i] = node.Value - } - - // Call the function - result, err := function.Callback(args) - if err != nil { - return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err) - } - - return result, function.ReturnType, nil -} - -type evalConditional struct{ *ast.Conditional } - -func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // On the stack we have literal nodes representing the resulting values - // of the condition, true and false expressions, but they are in reverse - // order. - falseLit := stack.Pop().(*ast.LiteralNode) - trueLit := stack.Pop().(*ast.LiteralNode) - condLit := stack.Pop().(*ast.LiteralNode) - - if condLit.IsUnknown() { - // If our conditional is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - if condLit.Value.(bool) { - return trueLit.Value, trueLit.Typex, nil - } else { - return falseLit.Value, trueLit.Typex, nil - } -} - -type evalIndex struct{ *ast.Index } - -func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - key := stack.Pop().(*ast.LiteralNode) - target := stack.Pop().(*ast.LiteralNode) - - variableName := v.Index.Target.(*ast.VariableAccess).Name - - if key.IsUnknown() { - // If our key is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - // For target, we'll accept collections containing unknown values but - // we still need to catch when the collection itself is unknown, shallowly. - if target.Typex == ast.TypeUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - switch target.Typex { - case ast.TypeList: - return v.evalListIndex(variableName, target.Value, key.Value) - case ast.TypeMap: - return v.evalMapIndex(variableName, target.Value, key.Value) - default: - return nil, ast.TypeInvalid, fmt.Errorf( - "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", - variableName, target.Typex) - } -} - -func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a list and key is an int - list, ok := target.([]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to []Variable, is: %T", target) - } - - keyInt, ok := key.(int) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to int, is: %T", key) - } - - if len(list) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("list is empty") - } - - if keyInt < 0 || len(list) < keyInt+1 { - return nil, ast.TypeInvalid, fmt.Errorf( - "index %d out of range for list %s (max %d)", - keyInt, variableName, len(list)) - } - - returnVal := list[keyInt].Value - returnType := list[keyInt].Type - return returnVal, returnType, nil -} - -func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a map and key is a string - vmap, ok := target.(map[string]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to map[string]Variable, is: %T", target) - } - - keyString, ok := key.(string) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to string, is: %T", key) - } - - if len(vmap) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("map is empty") - } - - value, ok := vmap[keyString] - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "key %q does not exist in map %s", keyString, variableName) - } - - return value.Value, value.Type, nil -} - -type evalOutput struct{ *ast.Output } - -func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // The expressions should all be on the stack in reverse - // order. So pop them off, reverse their order, and concatenate. - nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) - haveUnknown := false - for range v.Exprs { - n := stack.Pop().(*ast.LiteralNode) - nodes = append(nodes, n) - - // If we have any unknowns then the whole result is unknown - // (we must deal with this first, because the type checker can - // skip type conversions in the presence of unknowns, and thus - // any of our other nodes may be incorrectly typed.) - if n.IsUnknown() { - haveUnknown = true - } - } - - if haveUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - // Special case the single list and map - if len(nodes) == 1 { - switch t := nodes[0].Typex; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - fallthrough - case ast.TypeUnknown: - return nodes[0].Value, t, nil - } - } - - // Otherwise concatenate the strings - var buf bytes.Buffer - for i := len(nodes) - 1; i >= 0; i-- { - if nodes[i].Typex != ast.TypeString { - return nil, ast.TypeInvalid, fmt.Errorf( - "invalid output with %s value at index %d: %#v", - nodes[i].Typex, - i, - nodes[i].Value, - ) - } - buf.WriteString(nodes[i].Value.(string)) - } - - return buf.String(), ast.TypeString, nil -} - -type evalLiteralNode struct{ *ast.LiteralNode } - -func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) { - return v.Value, v.Typex, nil -} - -type evalVariableAccess struct{ *ast.VariableAccess } - -func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) { - // Look up the variable in the map - variable, ok := scope.LookupVar(v.Name) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown variable accessed: %s", v.Name) - } - - return variable.Value, variable.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go deleted file mode 100644 index 6946ecd2..00000000 --- a/vendor/github.com/hashicorp/hil/eval_type.go +++ /dev/null @@ -1,16 +0,0 @@ -package hil - -//go:generate stringer -type=EvalType eval_type.go - -// EvalType represents the type of the output returned from a HIL -// evaluation. -type EvalType uint32 - -const ( - TypeInvalid EvalType = 0 - TypeString EvalType = 1 << iota - TypeBool - TypeList - TypeMap - TypeUnknown -) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go deleted file mode 100644 index b107ddd4..00000000 --- a/vendor/github.com/hashicorp/hil/evaltype_string.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT - -package hil - -import "fmt" - -const ( - _EvalType_name_0 = "TypeInvalid" - _EvalType_name_1 = "TypeString" - _EvalType_name_2 = "TypeBool" - _EvalType_name_3 = "TypeList" - _EvalType_name_4 = "TypeMap" - _EvalType_name_5 = "TypeUnknown" -) - -var ( - _EvalType_index_0 = [...]uint8{0, 11} - _EvalType_index_1 = [...]uint8{0, 10} - _EvalType_index_2 = [...]uint8{0, 8} - _EvalType_index_3 = [...]uint8{0, 8} - _EvalType_index_4 = [...]uint8{0, 7} - _EvalType_index_5 = [...]uint8{0, 11} -) - -func (i EvalType) String() string { - switch { - case i == 0: - return _EvalType_name_0 - case i == 2: - return _EvalType_name_1 - case i == 4: - return _EvalType_name_2 - case i == 8: - return _EvalType_name_3 - case i == 16: - return _EvalType_name_4 - case i == 32: - return _EvalType_name_5 - default: - return fmt.Sprintf("EvalType(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/go.mod b/vendor/github.com/hashicorp/hil/go.mod deleted file mode 100644 index 45719a69..00000000 --- a/vendor/github.com/hashicorp/hil/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/hashicorp/hil - -require ( - github.com/mitchellh/mapstructure v1.1.2 - github.com/mitchellh/reflectwalk v1.0.0 -) diff --git a/vendor/github.com/hashicorp/hil/go.sum b/vendor/github.com/hashicorp/hil/go.sum deleted file mode 100644 index 83639b69..00000000 --- a/vendor/github.com/hashicorp/hil/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go deleted file mode 100644 index ecbe1fdb..00000000 --- a/vendor/github.com/hashicorp/hil/parse.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/parser" - "github.com/hashicorp/hil/scanner" -) - -// Parse parses the given program and returns an executable AST tree. -// -// Syntax errors are returned with error having the dynamic type -// *parser.ParseError, which gives the caller access to the source position -// where the error was found, which allows (for example) combining it with -// a known source filename to add context to the error message. -func Parse(v string) (ast.Node, error) { - return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) -} - -// ParseWithPosition is like Parse except that it overrides the source -// row and column position of the first character in the string, which should -// be 1-based. -// -// This can be used when HIL is embedded in another language and the outer -// parser knows the row and column where the HIL expression started within -// the overall source file. -func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { - ch := scanner.Scan(v, pos) - return parser.Parse(ch) -} diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go deleted file mode 100644 index 2e013e01..00000000 --- a/vendor/github.com/hashicorp/hil/parser/binary_op.go +++ /dev/null @@ -1,45 +0,0 @@ -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -var binaryOps []map[scanner.TokenType]ast.ArithmeticOp - -func init() { - // This operation table maps from the operator's scanner token type - // to the AST arithmetic operation. All expressions produced from - // binary operators are *ast.Arithmetic nodes. - // - // Binary operator groups are listed in order of precedence, with - // the *lowest* precedence first. Operators within the same group - // have left-to-right associativity. - binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{ - { - scanner.OR: ast.ArithmeticOpLogicalOr, - }, - { - scanner.AND: ast.ArithmeticOpLogicalAnd, - }, - { - scanner.EQUAL: ast.ArithmeticOpEqual, - scanner.NOTEQUAL: ast.ArithmeticOpNotEqual, - }, - { - scanner.GT: ast.ArithmeticOpGreaterThan, - scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual, - scanner.LT: ast.ArithmeticOpLessThan, - scanner.LTE: ast.ArithmeticOpLessThanOrEqual, - }, - { - scanner.PLUS: ast.ArithmeticOpAdd, - scanner.MINUS: ast.ArithmeticOpSub, - }, - { - scanner.STAR: ast.ArithmeticOpMul, - scanner.SLASH: ast.ArithmeticOpDiv, - scanner.PERCENT: ast.ArithmeticOpMod, - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go deleted file mode 100644 index bacd6964..00000000 --- a/vendor/github.com/hashicorp/hil/parser/error.go +++ /dev/null @@ -1,38 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -type ParseError struct { - Message string - Pos ast.Pos -} - -func Errorf(pos ast.Pos, format string, args ...interface{}) error { - return &ParseError{ - Message: fmt.Sprintf(format, args...), - Pos: pos, - } -} - -// TokenErrorf is a convenient wrapper around Errorf that uses the -// position of the given token. -func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error { - return Errorf(token.Pos, format, args...) -} - -func ExpectationError(wanted string, got *scanner.Token) error { - return TokenErrorf(got, "expected %s but found %s", wanted, got) -} - -func (e *ParseError) Error() string { - return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message) -} - -func (e *ParseError) String() string { - return e.Error() -} diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go deleted file mode 100644 index de954f38..00000000 --- a/vendor/github.com/hashicorp/hil/parser/fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build gofuzz - -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -// This is a fuzz testing function designed to be used with go-fuzz: -// https://github.com/dvyukov/go-fuzz -// -// It's not included in a normal build due to the gofuzz build tag above. -// -// There are some input files that you can use as a seed corpus for go-fuzz -// in the directory ./fuzz-corpus . - -func Fuzz(data []byte) int { - str := string(data) - - ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1}) - _, err := Parse(ch) - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go deleted file mode 100644 index 376f1c49..00000000 --- a/vendor/github.com/hashicorp/hil/parser/parser.go +++ /dev/null @@ -1,522 +0,0 @@ -package parser - -import ( - "strconv" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -func Parse(ch <-chan *scanner.Token) (ast.Node, error) { - peeker := scanner.NewPeeker(ch) - parser := &parser{peeker} - output, err := parser.ParseTopLevel() - peeker.Close() - return output, err -} - -type parser struct { - peeker *scanner.Peeker -} - -func (p *parser) ParseTopLevel() (ast.Node, error) { - return p.parseInterpolationSeq(false) -} - -func (p *parser) ParseQuoted() (ast.Node, error) { - return p.parseInterpolationSeq(true) -} - -// parseInterpolationSeq parses either the top-level sequence of literals -// and interpolation expressions or a similar sequence within a quoted -// string inside an interpolation expression. The latter case is requested -// by setting 'quoted' to true. -func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) { - literalType := scanner.LITERAL - endType := scanner.EOF - if quoted { - // exceptions for quoted sequences - literalType = scanner.STRING - endType = scanner.CQUOTE - } - - startPos := p.peeker.Peek().Pos - - if quoted { - tok := p.peeker.Read() - if tok.Type != scanner.OQUOTE { - return nil, ExpectationError("open quote", tok) - } - } - - var exprs []ast.Node - for { - tok := p.peeker.Read() - - if tok.Type == endType { - break - } - - switch tok.Type { - case literalType: - val, err := p.parseStringToken(tok) - if err != nil { - return nil, err - } - exprs = append(exprs, &ast.LiteralNode{ - Value: val, - Typex: ast.TypeString, - Posx: tok.Pos, - }) - case scanner.BEGIN: - expr, err := p.ParseInterpolation() - if err != nil { - return nil, err - } - exprs = append(exprs, expr) - default: - return nil, ExpectationError(`"${"`, tok) - } - } - - if len(exprs) == 0 { - // If we have no parts at all then the input must've - // been an empty string. - exprs = append(exprs, &ast.LiteralNode{ - Value: "", - Typex: ast.TypeString, - Posx: startPos, - }) - } - - // As a special case, if our "Output" contains only one expression - // and it's a literal string then we'll hoist it up to be our - // direct return value, so callers can easily recognize a string - // that has no interpolations at all. - if len(exprs) == 1 { - if lit, ok := exprs[0].(*ast.LiteralNode); ok { - if lit.Typex == ast.TypeString { - return lit, nil - } - } - } - - return &ast.Output{ - Exprs: exprs, - Posx: startPos, - }, nil -} - -// parseStringToken takes a token of either LITERAL or STRING type and -// returns the interpreted string, after processing any relevant -// escape sequences. -func (p *parser) parseStringToken(tok *scanner.Token) (string, error) { - var backslashes bool - switch tok.Type { - case scanner.LITERAL: - backslashes = false - case scanner.STRING: - backslashes = true - default: - panic("unsupported string token type") - } - - raw := []byte(tok.Content) - buf := make([]byte, 0, len(raw)) - - for i := 0; i < len(raw); i++ { - b := raw[i] - more := len(raw) > (i + 1) - - if b == '$' { - if more && raw[i+1] == '$' { - // skip over the second dollar sign - i++ - } - } else if backslashes && b == '\\' { - if !more { - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `unfinished backslash escape sequence`, - ) - } - escapeType := raw[i+1] - switch escapeType { - case '\\': - // skip over the second slash - i++ - case 'n': - b = '\n' - i++ - case '"': - b = '"' - i++ - default: - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `invalid backslash escape sequence`, - ) - } - } - - buf = append(buf, b) - } - - return string(buf), nil -} - -func (p *parser) ParseInterpolation() (ast.Node, error) { - // By the time we're called, we're already "inside" the ${ sequence - // because the caller consumed the ${ token. - - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - - err = p.requireTokenType(scanner.END, `"}"`) - if err != nil { - return nil, err - } - - return expr, nil -} - -func (p *parser) ParseExpression() (ast.Node, error) { - return p.parseTernaryCond() -} - -func (p *parser) parseTernaryCond() (ast.Node, error) { - // The ternary condition operator (.. ? .. : ..) behaves somewhat - // like a binary operator except that the "operator" is itself - // an expression enclosed in two punctuation characters. - // The middle expression is parsed as if the ? and : symbols - // were parentheses. The "rhs" (the "false expression") is then - // treated right-associatively so it behaves similarly to the - // middle in terms of precedence. - - startPos := p.peeker.Peek().Pos - - var cond, trueExpr, falseExpr ast.Node - var err error - - cond, err = p.parseBinaryOps(binaryOps) - if err != nil { - return nil, err - } - - next := p.peeker.Peek() - if next.Type != scanner.QUESTION { - return cond, nil - } - - p.peeker.Read() // eat question mark - - trueExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - colon := p.peeker.Read() - if colon.Type != scanner.COLON { - return nil, ExpectationError(":", colon) - } - - falseExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - return &ast.Conditional{ - CondExpr: cond, - TrueExpr: trueExpr, - FalseExpr: falseExpr, - Posx: startPos, - }, nil -} - -// parseBinaryOps calls itself recursively to work through all of the -// operator precedence groups, and then eventually calls ParseExpressionTerm -// for each operand. -func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) { - if len(ops) == 0 { - // We've run out of operators, so now we'll just try to parse a term. - return p.ParseExpressionTerm() - } - - thisLevel := ops[0] - remaining := ops[1:] - - startPos := p.peeker.Peek().Pos - - var lhs, rhs ast.Node - operator := ast.ArithmeticOpInvalid - var err error - - // parse a term that might be the first operand of a binary - // expression or it might just be a standalone term, but - // we won't know until we've parsed it and can look ahead - // to see if there's an operator token. - lhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - - // We'll keep eating up arithmetic operators until we run - // out, so that operators with the same precedence will combine in a - // left-associative manner: - // a+b+c => (a+b)+c, not a+(b+c) - // - // Should we later want to have right-associative operators, a way - // to achieve that would be to call back up to ParseExpression here - // instead of iteratively parsing only the remaining operators. - for { - next := p.peeker.Peek() - var newOperator ast.ArithmeticOp - var ok bool - if newOperator, ok = thisLevel[next.Type]; !ok { - break - } - - // Are we extending an expression started on - // the previous iteration? - if operator != ast.ArithmeticOpInvalid { - lhs = &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - } - } - - operator = newOperator - p.peeker.Read() // eat operator token - rhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - } - - if operator != ast.ArithmeticOpInvalid { - return &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - }, nil - } else { - return lhs, nil - } -} - -func (p *parser) ParseExpressionTerm() (ast.Node, error) { - - next := p.peeker.Peek() - - switch next.Type { - - case scanner.OPAREN: - p.peeker.Read() - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CPAREN, `")"`) - return expr, err - - case scanner.OQUOTE: - return p.ParseQuoted() - - case scanner.INTEGER: - tok := p.peeker.Read() - val, err := strconv.Atoi(tok.Content) - if err != nil { - return nil, TokenErrorf(tok, "invalid integer: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeInt, - Posx: tok.Pos, - }, nil - - case scanner.FLOAT: - tok := p.peeker.Read() - val, err := strconv.ParseFloat(tok.Content, 64) - if err != nil { - return nil, TokenErrorf(tok, "invalid float: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeFloat, - Posx: tok.Pos, - }, nil - - case scanner.BOOL: - tok := p.peeker.Read() - // the scanner guarantees that tok.Content is either "true" or "false" - var val bool - if tok.Content[0] == 't' { - val = true - } else { - val = false - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeBool, - Posx: tok.Pos, - }, nil - - case scanner.MINUS: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5) - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents negative numbers as - // a binary subtraction of the number from zero. - return &ast.Arithmetic{ - Op: ast.ArithmeticOpSub, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: 0, - Typex: ast.TypeInt, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.BANG: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents binary negation as an equality - // test with "false". - return &ast.Arithmetic{ - Op: ast.ArithmeticOpEqual, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: false, - Typex: ast.TypeBool, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.IDENTIFIER: - return p.ParseScopeInteraction() - - default: - return nil, ExpectationError("expression", next) - } -} - -// ParseScopeInteraction parses the expression types that interact -// with the evaluation scope: variable access, function calls, and -// indexing. -// -// Indexing should actually be a distinct operator in its own right, -// so that e.g. it can be applied to the result of a function call, -// but for now we're preserving the behavior of the older yacc-based -// parser. -func (p *parser) ParseScopeInteraction() (ast.Node, error) { - first := p.peeker.Read() - startPos := first.Pos - if first.Type != scanner.IDENTIFIER { - return nil, ExpectationError("identifier", first) - } - - next := p.peeker.Peek() - if next.Type == scanner.OPAREN { - // function call - funcName := first.Content - p.peeker.Read() // eat paren - var args []ast.Node - - for { - if p.peeker.Peek().Type == scanner.CPAREN { - break - } - - arg, err := p.ParseExpression() - if err != nil { - return nil, err - } - - args = append(args, arg) - - if p.peeker.Peek().Type == scanner.COMMA { - p.peeker.Read() // eat comma - continue - } else { - break - } - } - - err := p.requireTokenType(scanner.CPAREN, `")"`) - if err != nil { - return nil, err - } - - return &ast.Call{ - Func: funcName, - Args: args, - Posx: startPos, - }, nil - } - - varNode := &ast.VariableAccess{ - Name: first.Content, - Posx: startPos, - } - - if p.peeker.Peek().Type == scanner.OBRACKET { - // index operator - startPos := p.peeker.Read().Pos // eat bracket - indexExpr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CBRACKET, `"]"`) - if err != nil { - return nil, err - } - return &ast.Index{ - Target: varNode, - Key: indexExpr, - Posx: startPos, - }, nil - } - - return varNode, nil -} - -// requireTokenType consumes the next token an returns an error if its -// type does not match the given type. nil is returned if the type matches. -// -// This is a helper around peeker.Read() for situations where the parser just -// wants to assert that a particular token type must be present. -func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error { - token := p.peeker.Read() - if token.Type != wantType { - return ExpectationError(wantName, token) - } - return nil -} diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go deleted file mode 100644 index 4de37283..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/peeker.go +++ /dev/null @@ -1,55 +0,0 @@ -package scanner - -// Peeker is a utility that wraps a token channel returned by Scan and -// provides an interface that allows a caller (e.g. the parser) to -// work with the token stream in a mode that allows one token of lookahead, -// and provides utilities for more convenient processing of the stream. -type Peeker struct { - ch <-chan *Token - peeked *Token -} - -func NewPeeker(ch <-chan *Token) *Peeker { - return &Peeker{ - ch: ch, - } -} - -// Peek returns the next token in the stream without consuming it. A -// subsequent call to Read will return the same token. -func (p *Peeker) Peek() *Token { - if p.peeked == nil { - p.peeked = <-p.ch - } - return p.peeked -} - -// Read consumes the next token in the stream and returns it. -func (p *Peeker) Read() *Token { - token := p.Peek() - - // As a special case, we will produce the EOF token forever once - // it is reached. - if token.Type != EOF { - p.peeked = nil - } - - return token -} - -// Close ensures that the token stream has been exhausted, to prevent -// the goroutine in the underlying scanner from leaking. -// -// It's not necessary to call this if the caller reads the token stream -// to EOF, since that implicitly closes the scanner. -func (p *Peeker) Close() { - for _ = range p.ch { - // discard - } - // Install a synthetic EOF token in 'peeked' in case someone - // erroneously calls Peek() or Read() after we've closed. - p.peeked = &Token{ - Type: EOF, - Content: "", - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go deleted file mode 100644 index 86085de0..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ /dev/null @@ -1,556 +0,0 @@ -package scanner - -import ( - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" -) - -// Scan returns a channel that recieves Tokens from the given input string. -// -// The scanner's job is just to partition the string into meaningful parts. -// It doesn't do any transformation of the raw input string, so the caller -// must deal with any further interpretation required, such as parsing INTEGER -// tokens into real ints, or dealing with escape sequences in LITERAL or -// STRING tokens. -// -// Strings in the returned tokens are slices from the original string. -// -// startPos should be set to ast.InitPos unless the caller knows that -// this interpolation string is part of a larger file and knows the position -// of the first character in that larger file. -func Scan(s string, startPos ast.Pos) <-chan *Token { - ch := make(chan *Token) - go scan(s, ch, startPos) - return ch -} - -func scan(s string, ch chan<- *Token, pos ast.Pos) { - // 'remain' starts off as the whole string but we gradually - // slice of the front of it as we work our way through. - remain := s - - // nesting keeps track of how many ${ .. } sequences we are - // inside, so we can recognize the minor differences in syntax - // between outer string literals (LITERAL tokens) and quoted - // string literals (STRING tokens). - nesting := 0 - - // We're going to flip back and forth between parsing literals/strings - // and parsing interpolation sequences ${ .. } until we reach EOF or - // some INVALID token. -All: - for { - startPos := pos - // Literal string processing first, since the beginning of - // a string is always outside of an interpolation sequence. - literalVal, terminator := scanLiteral(remain, pos, nesting > 0) - - if len(literalVal) > 0 { - litType := LITERAL - if nesting > 0 { - litType = STRING - } - ch <- &Token{ - Type: litType, - Content: literalVal, - Pos: startPos, - } - remain = remain[len(literalVal):] - } - - ch <- terminator - remain = remain[len(terminator.Content):] - pos = terminator.Pos - // Safe to use len() here because none of the terminator tokens - // can contain UTF-8 sequences. - pos.Column = pos.Column + len(terminator.Content) - - switch terminator.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done! - break All - case BEGIN: - nesting++ - case CQUOTE: - // nothing special to do - default: - // Should never happen - panic("invalid string/literal terminator") - } - - // Now we do the processing of the insides of ${ .. } sequences. - // This loop terminates when we encounter either a closing } or - // an opening ", which will cause us to return to literal processing. - Interpolation: - for { - - token, size, newPos := scanInterpolationToken(remain, pos) - ch <- token - remain = remain[size:] - pos = newPos - - switch token.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done - // (though a syntax error that we'll catch in the parser) - break All - case END: - nesting-- - if nesting < 0 { - // Can happen if there are unbalanced ${ and } sequences - // in the input, which we'll catch in the parser. - nesting = 0 - } - break Interpolation - case OQUOTE: - // Beginning of nested quoted string - break Interpolation - } - } - } - - close(ch) -} - -// Returns the token found at the start of the given string, followed by -// the number of bytes that were consumed from the string and the adjusted -// source position. -// -// Note that the number of bytes consumed can be more than the length of -// the returned token contents if the string begins with whitespace, since -// it will be silently consumed before reading the token. -func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) { - pos := startPos - size := 0 - - // Consume whitespace, if any - for len(s) > 0 && byteIsSpace(s[0]) { - if s[0] == '\n' { - pos.Column = 1 - pos.Line++ - } else { - pos.Column++ - } - size++ - s = s[1:] - } - - // Unexpected EOF during sequence - if len(s) == 0 { - return &Token{ - Type: EOF, - Content: "", - Pos: pos, - }, size, pos - } - - next := s[0] - var token *Token - - switch next { - case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':': - // Easy punctuation symbols that don't have any special meaning - // during scanning, and that stand for themselves in the - // TokenType enumeration. - token = &Token{ - Type: TokenType(next), - Content: s[:1], - Pos: pos, - } - case '}': - token = &Token{ - Type: END, - Content: s[:1], - Pos: pos, - } - case '"': - token = &Token{ - Type: OQUOTE, - Content: s[:1], - Pos: pos, - } - case '!': - if len(s) >= 2 && s[:2] == "!=" { - token = &Token{ - Type: NOTEQUAL, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: BANG, - Content: s[:1], - Pos: pos, - } - } - case '<': - if len(s) >= 2 && s[:2] == "<=" { - token = &Token{ - Type: LTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: LT, - Content: s[:1], - Pos: pos, - } - } - case '>': - if len(s) >= 2 && s[:2] == ">=" { - token = &Token{ - Type: GTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: GT, - Content: s[:1], - Pos: pos, - } - } - case '=': - if len(s) >= 2 && s[:2] == "==" { - token = &Token{ - Type: EQUAL, - Content: s[:2], - Pos: pos, - } - } else { - // A single equals is not a valid operator - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '&': - if len(s) >= 2 && s[:2] == "&&" { - token = &Token{ - Type: AND, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '|': - if len(s) >= 2 && s[:2] == "||" { - token = &Token{ - Type: OR, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - default: - if next >= '0' && next <= '9' { - num, numType := scanNumber(s) - token = &Token{ - Type: numType, - Content: num, - Pos: pos, - } - } else if stringStartsWithIdentifier(s) { - ident, runeLen := scanIdentifier(s) - tokenType := IDENTIFIER - if ident == "true" || ident == "false" { - tokenType = BOOL - } - token = &Token{ - Type: tokenType, - Content: ident, - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + runeLen - return token, size + len(ident), pos - } else { - _, byteLen := utf8.DecodeRuneInString(s) - token = &Token{ - Type: INVALID, - Content: s[:byteLen], - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + 1 - return token, size + byteLen, pos - } - } - - // Here we assume that the token content contains no UTF-8 sequences, - // because we dealt with UTF-8 characters as a special case where - // necessary above. - size = size + len(token.Content) - pos.Column = pos.Column + len(token.Content) - - return token, size, pos -} - -// Returns the (possibly-empty) prefix of the given string that represents -// a literal, followed by the token that marks the end of the literal. -func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { - litLen := 0 - pos := startPos - var terminator *Token - for { - - if litLen >= len(s) { - if nested { - // We've ended in the middle of a quoted string, - // which means this token is actually invalid. - return "", &Token{ - Type: INVALID, - Content: s, - Pos: startPos, - } - } - terminator = &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break - } - - next := s[litLen] - - if next == '$' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '{' { - terminator = &Token{ - Type: BEGIN, - Content: s[litLen : litLen+2], - Pos: pos, - } - pos.Column = pos.Column + 2 - break - } else if follow == '$' { - // Double-$ escapes the special processing of $, - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - - // special handling that applies only to quoted strings - if nested { - if next == '"' { - terminator = &Token{ - Type: CQUOTE, - Content: s[litLen : litLen+1], - Pos: pos, - } - pos.Column = pos.Column + 1 - break - } - - // Escaped quote marks do not terminate the string. - // - // All we do here in the scanner is avoid terminating a string - // due to an escaped quote. The parser is responsible for the - // full handling of escape sequences, since it's able to produce - // better error messages than we can produce in here. - if next == '\\' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '"' { - // \" escapes the special processing of ", - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } else if follow == '\\' { - // \\ escapes \ - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - } - - if next == '\n' { - pos.Column = 1 - pos.Line++ - litLen++ - } else { - pos.Column++ - - // "Column" measures runes, so we need to actually consume - // a valid UTF-8 character here. - _, size := utf8.DecodeRuneInString(s[litLen:]) - litLen = litLen + size - } - - } - - return s[:litLen], terminator -} - -// scanNumber returns the extent of the prefix of the string that represents -// a valid number, along with what type of number it represents: INT or FLOAT. -// -// scanNumber does only basic character analysis: numbers consist of digits -// and periods, with at least one period signalling a FLOAT. It's the parser's -// responsibility to validate the form and range of the number, such as ensuring -// that a FLOAT actually contains only one period, etc. -func scanNumber(s string) (string, TokenType) { - period := -1 - byteLen := 0 - numType := INTEGER - for { - if byteLen >= len(s) { - break - } - - next := s[byteLen] - if next != '.' && (next < '0' || next > '9') { - // If our last value was a period, then we're not a float, - // we're just an integer that ends in a period. - if period == byteLen-1 { - byteLen-- - numType = INTEGER - } - - break - } - - if next == '.' { - // If we've already seen a period, break out - if period >= 0 { - break - } - - period = byteLen - numType = FLOAT - } - - byteLen++ - } - - return s[:byteLen], numType -} - -// scanIdentifier returns the extent of the prefix of the string that -// represents a valid identifier, along with the length of that prefix -// in runes. -// -// Identifiers may contain utf8-encoded non-Latin letters, which will -// cause the returned "rune length" to be shorter than the byte length -// of the returned string. -func scanIdentifier(s string) (string, int) { - byteLen := 0 - runeLen := 0 - for { - if byteLen >= len(s) { - break - } - - nextRune, size := utf8.DecodeRuneInString(s[byteLen:]) - if !(nextRune == '_' || - nextRune == '-' || - nextRune == '.' || - nextRune == '*' || - unicode.IsNumber(nextRune) || - unicode.IsLetter(nextRune) || - unicode.IsMark(nextRune)) { - break - } - - // If we reach a star, it must be between periods to be part - // of the same identifier. - if nextRune == '*' && s[byteLen-1] != '.' { - break - } - - // If our previous character was a star, then the current must - // be period. Otherwise, undo that and exit. - if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' { - byteLen-- - if s[byteLen-1] == '.' { - byteLen-- - } - - break - } - - byteLen = byteLen + size - runeLen = runeLen + 1 - } - - return s[:byteLen], runeLen -} - -// byteIsSpace implements a restrictive interpretation of spaces that includes -// only what's valid inside interpolation sequences: spaces, tabs, newlines. -func byteIsSpace(b byte) bool { - switch b { - case ' ', '\t', '\r', '\n': - return true - default: - return false - } -} - -// stringStartsWithIdentifier returns true if the given string begins with -// a character that is a legal start of an identifier: an underscore or -// any character that Unicode considers to be a letter. -func stringStartsWithIdentifier(s string) bool { - if len(s) == 0 { - return false - } - - first := s[0] - - // Easy ASCII cases first - if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' { - return true - } - - // If our first byte begins a UTF-8 sequence then the sequence might - // be a unicode letter. - if utf8.RuneStart(first) { - firstRune, _ := utf8.DecodeRuneInString(s) - if unicode.IsLetter(firstRune) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go deleted file mode 100644 index b6c82ae9..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/token.go +++ /dev/null @@ -1,105 +0,0 @@ -package scanner - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -type Token struct { - Type TokenType - Content string - Pos ast.Pos -} - -//go:generate stringer -type=TokenType -type TokenType rune - -const ( - // Raw string data outside of ${ .. } sequences - LITERAL TokenType = 'o' - - // STRING is like a LITERAL but it's inside a quoted string - // within a ${ ... } sequence, and so it can contain backslash - // escaping. - STRING TokenType = 'S' - - // Other Literals - INTEGER TokenType = 'I' - FLOAT TokenType = 'F' - BOOL TokenType = 'B' - - BEGIN TokenType = '$' // actually "${" - END TokenType = '}' - OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence - CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence - OPAREN TokenType = '(' - CPAREN TokenType = ')' - OBRACKET TokenType = '[' - CBRACKET TokenType = ']' - COMMA TokenType = ',' - - IDENTIFIER TokenType = 'i' - - PERIOD TokenType = '.' - PLUS TokenType = '+' - MINUS TokenType = '-' - STAR TokenType = '*' - SLASH TokenType = '/' - PERCENT TokenType = '%' - - AND TokenType = '∧' - OR TokenType = '∨' - BANG TokenType = '!' - - EQUAL TokenType = '=' - NOTEQUAL TokenType = '≠' - GT TokenType = '>' - LT TokenType = '<' - GTE TokenType = '≥' - LTE TokenType = '≤' - - QUESTION TokenType = '?' - COLON TokenType = ':' - - EOF TokenType = '␄' - - // Produced for sequences that cannot be understood as valid tokens - // e.g. due to use of unrecognized punctuation. - INVALID TokenType = '�' -) - -func (t *Token) String() string { - switch t.Type { - case EOF: - return "end of string" - case INVALID: - return fmt.Sprintf("invalid sequence %q", t.Content) - case INTEGER: - return fmt.Sprintf("integer %s", t.Content) - case FLOAT: - return fmt.Sprintf("float %s", t.Content) - case STRING: - return fmt.Sprintf("string %q", t.Content) - case LITERAL: - return fmt.Sprintf("literal %q", t.Content) - case OQUOTE: - return fmt.Sprintf("opening quote") - case CQUOTE: - return fmt.Sprintf("closing quote") - case AND: - return "&&" - case OR: - return "||" - case NOTEQUAL: - return "!=" - case GTE: - return ">=" - case LTE: - return "<=" - default: - // The remaining token types have content that - // speaks for itself. - return fmt.Sprintf("%q", t.Content) - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go deleted file mode 100644 index a602f5fd..00000000 --- a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by "stringer -type=TokenType"; DO NOT EDIT - -package scanner - -import "fmt" - -const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID" - -var _TokenType_map = map[TokenType]string{ - 33: _TokenType_name[0:4], - 36: _TokenType_name[4:9], - 37: _TokenType_name[9:16], - 40: _TokenType_name[16:22], - 41: _TokenType_name[22:28], - 42: _TokenType_name[28:32], - 43: _TokenType_name[32:36], - 44: _TokenType_name[36:41], - 45: _TokenType_name[41:46], - 46: _TokenType_name[46:52], - 47: _TokenType_name[52:57], - 58: _TokenType_name[57:62], - 60: _TokenType_name[62:64], - 61: _TokenType_name[64:69], - 62: _TokenType_name[69:71], - 63: _TokenType_name[71:79], - 66: _TokenType_name[79:83], - 70: _TokenType_name[83:88], - 73: _TokenType_name[88:95], - 83: _TokenType_name[95:101], - 91: _TokenType_name[101:109], - 93: _TokenType_name[109:117], - 105: _TokenType_name[117:127], - 111: _TokenType_name[127:134], - 125: _TokenType_name[134:137], - 8220: _TokenType_name[137:143], - 8221: _TokenType_name[143:149], - 8743: _TokenType_name[149:152], - 8744: _TokenType_name[152:154], - 8800: _TokenType_name[154:162], - 8804: _TokenType_name[162:165], - 8805: _TokenType_name[165:168], - 9220: _TokenType_name[168:171], - 65533: _TokenType_name[171:178], -} - -func (i TokenType) String() string { - if str, ok := _TokenType_map[i]; ok { - return str - } - return fmt.Sprintf("TokenType(%d)", i) -} diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go deleted file mode 100644 index e69df294..00000000 --- a/vendor/github.com/hashicorp/hil/transform_fixed.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" -) - -// FixedValueTransform transforms an AST to return a fixed value for -// all interpolations. i.e. you can make "hi ${anything}" always -// turn into "hi foo". -// -// The primary use case for this is for config validations where you can -// verify that interpolations result in a certain type of string. -func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node { - // We visit the nodes in top-down order - result := root - switch n := result.(type) { - case *ast.Output: - for i, v := range n.Exprs { - n.Exprs[i] = FixedValueTransform(v, Value) - } - case *ast.LiteralNode: - // We keep it as-is - default: - // Anything else we replace - result = Value - } - - return result -} diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go deleted file mode 100644 index 0ace8306..00000000 --- a/vendor/github.com/hashicorp/hil/walk.go +++ /dev/null @@ -1,266 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/reflectwalk" -) - -// WalkFn is the type of function to pass to Walk. Modify fields within -// WalkData to control whether replacement happens. -type WalkFn func(*WalkData) error - -// WalkData is the structure passed to the callback of the Walk function. -// -// This structure contains data passed in as well as fields that are expected -// to be written by the caller as a result. Please see the documentation for -// each field for more information. -type WalkData struct { - // Root is the parsed root of this HIL program - Root ast.Node - - // Location is the location within the structure where this - // value was found. This can be used to modify behavior within - // slices and so on. - Location reflectwalk.Location - - // The below two values must be set by the callback to have any effect. - // - // Replace, if true, will replace the value in the structure with - // ReplaceValue. It is up to the caller to make sure this is a string. - Replace bool - ReplaceValue string -} - -// Walk will walk an arbitrary Go structure and parse any string as an -// HIL program and call the callback cb to determine what to replace it -// with. -// -// This function is very useful for arbitrary HIL program interpolation -// across a complex configuration structure. Due to the heavy use of -// reflection in this function, it is recommend to write many unit tests -// with your typical configuration structures to hilp mitigate the risk -// of panics. -func Walk(v interface{}, cb WalkFn) error { - walker := &interpolationWalker{F: cb} - return reflectwalk.Walk(v, walker) -} - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - F WalkFn - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex int - unknownKeys []string -} - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - w.key = append(w.key, k.String()) - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = i - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.F == nil { - return nil - } - - data := WalkData{Root: astRoot, Location: w.loc} - if err := w.F(&data); err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if data.Replace { - /* - if remove { - w.removeCurrent() - return nil - } - */ - - resultVal := reflect.ValueOf(data.ReplaceValue) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) removeCurrent() { - // Append the key to the unknown keys - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - - for i := 1; i <= len(w.cs); i++ { - c := w.cs[len(w.cs)-i] - switch c.Kind() { - case reflect.Map: - // Zero value so that we delete the map key - var val reflect.Value - - // Get the key and delete it - k := w.csData.(reflect.Value) - c.SetMapIndex(k, val) - return - } - } - - panic("No container found for removeCurrent") -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func (w *interpolationWalker) splitSlice() { - // Get the []interface{} slice so we can do some operations on - // it without dealing with reflection. We'll document each step - // here to be clear. - var s []interface{} - raw := w.cs[len(w.cs)-1] - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - default: - panic("Unknown kind: " + raw.Kind().String()) - } - - // Check if we have any elements that we need to split. If not, then - // just return since we're done. - split := false - if !split { - return - } - - // Make a new result slice that is twice the capacity to fit our growth. - result := make([]interface{}, 0, len(s)*2) - - // Go over each element of the original slice and start building up - // the resulting slice by splitting where we have to. - for _, v := range s { - sv, ok := v.(string) - if !ok { - // Not a string, so just set it - result = append(result, v) - continue - } - - // Not a string list, so just set it - result = append(result, sv) - } - - // Our slice is now done, we have to replace the slice now - // with this new one that we have. - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/hashicorp/logutils/.gitignore similarity index 100% rename from vendor/github.com/armon/go-radix/.gitignore rename to vendor/github.com/hashicorp/logutils/.gitignore diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE similarity index 100% rename from vendor/github.com/hashicorp/terraform/LICENSE rename to vendor/github.com/hashicorp/logutils/LICENSE diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md new file mode 100644 index 00000000..49490eae --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/README.md @@ -0,0 +1,36 @@ +# logutils + +logutils is a Go package that augments the standard library "log" package +to make logging a bit more modern, without fragmenting the Go ecosystem +with new logging packages. + +## The simplest thing that could possibly work + +Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following: + +```go +package main + +import ( + "log" + "os" + + "github.com/hashicorp/logutils" +) + +func main() { + filter := &logutils.LevelFilter{ + Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"}, + MinLevel: logutils.LogLevel("WARN"), + Writer: os.Stderr, + } + log.SetOutput(filter) + + log.Print("[DEBUG] Debugging") // this will not print + log.Print("[WARN] Warning") // this will + log.Print("[ERROR] Erring") // and so will this + log.Print("Message I haven't updated") // and so will this +} +``` + +This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before. diff --git a/vendor/github.com/hashicorp/logutils/go.mod b/vendor/github.com/hashicorp/logutils/go.mod new file mode 100644 index 00000000..ba38a457 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/logutils diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go new file mode 100644 index 00000000..6381bf16 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/level.go @@ -0,0 +1,81 @@ +// Package logutils augments the standard log package with levels. +package logutils + +import ( + "bytes" + "io" + "sync" +) + +type LogLevel string + +// LevelFilter is an io.Writer that can be used with a logger that +// will filter out log messages that aren't at least a certain level. +// +// Once the filter is in use somewhere, it is not safe to modify +// the structure. +type LevelFilter struct { + // Levels is the list of log levels, in increasing order of + // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. + Levels []LogLevel + + // MinLevel is the minimum level allowed through + MinLevel LogLevel + + // The underlying io.Writer where log messages that pass the filter + // will be set. + Writer io.Writer + + badLevels map[LogLevel]struct{} + once sync.Once +} + +// Check will check a given line if it would be included in the level +// filter. +func (f *LevelFilter) Check(line []byte) bool { + f.once.Do(f.init) + + // Check for a log level + var level LogLevel + x := bytes.IndexByte(line, '[') + if x >= 0 { + y := bytes.IndexByte(line[x:], ']') + if y >= 0 { + level = LogLevel(line[x+1 : x+y]) + } + } + + _, ok := f.badLevels[level] + return !ok +} + +func (f *LevelFilter) Write(p []byte) (n int, err error) { + // Note in general that io.Writer can receive any byte sequence + // to write, but the "log" package always guarantees that we only + // get a single line. We use that as a slight optimization within + // this method, assuming we're dealing with a single, complete line + // of log data. + + if !f.Check(p) { + return len(p), nil + } + + return f.Writer.Write(p) +} + +// SetMinLevel is used to update the minimum log level +func (f *LevelFilter) SetMinLevel(min LogLevel) { + f.MinLevel = min + f.init() +} + +func (f *LevelFilter) init() { + badLevels := make(map[LogLevel]struct{}) + for _, level := range f.Levels { + if level == f.MinLevel { + break + } + badLevels[level] = struct{}{} + } + f.badLevels = badLevels +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE deleted file mode 100644 index 82b4de97..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go deleted file mode 100644 index d9d27625..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go +++ /dev/null @@ -1,138 +0,0 @@ -package tfconfig - -import ( - "fmt" - - legacyhclparser "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/v2" -) - -// Diagnostic describes a problem (error or warning) encountered during -// configuration loading. -type Diagnostic struct { - Severity DiagSeverity `json:"severity"` - Summary string `json:"summary"` - Detail string `json:"detail,omitempty"` - - // Pos is not populated for all diagnostics, but when populated should - // indicate a particular line that the described problem relates to. - Pos *SourcePos `json:"pos,omitempty"` -} - -// Diagnostics represents a sequence of diagnostics. This is the type that -// should be returned from a function that might generate diagnostics. -type Diagnostics []Diagnostic - -// HasErrors returns true if there is at least one Diagnostic of severity -// DiagError in the receiever. -// -// If a function returns a Diagnostics without errors then the result can -// be assumed to be complete within the "best effort" constraints of this -// library. If errors are present then the caller may wish to employ more -// caution in relying on the result. -func (diags Diagnostics) HasErrors() bool { - for _, diag := range diags { - if diag.Severity == DiagError { - return true - } - } - return false -} - -func (diags Diagnostics) Error() string { - switch len(diags) { - case 0: - return "no problems" - case 1: - return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) - default: - return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) - } -} - -// Err returns an error representing the receiver if the receiver HasErrors, or -// nil otherwise. -// -// The returned error can be type-asserted back to a Diagnostics if needed. -func (diags Diagnostics) Err() error { - if diags.HasErrors() { - return diags - } - return nil -} - -// DiagSeverity describes the severity of a Diagnostic. -type DiagSeverity rune - -// DiagError indicates a problem that prevented proper processing of the -// configuration. In the precense of DiagError diagnostics the result is -// likely to be incomplete. -const DiagError DiagSeverity = 'E' - -// DiagWarning indicates a problem that the user may wish to consider but -// that did not prevent proper processing of the configuration. -const DiagWarning DiagSeverity = 'W' - -// MarshalJSON is an implementation of encoding/json.Marshaler -func (s DiagSeverity) MarshalJSON() ([]byte, error) { - switch s { - case DiagError: - return []byte(`"error"`), nil - case DiagWarning: - return []byte(`"warning"`), nil - default: - return []byte(`"invalid"`), nil - } -} - -func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { - if len(diags) == 0 { - return nil - } - ret := make(Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = Diagnostic{ - Summary: diag.Summary, - Detail: diag.Detail, - } - switch diag.Severity { - case hcl.DiagError: - ret[i].Severity = DiagError - case hcl.DiagWarning: - ret[i].Severity = DiagWarning - } - if diag.Subject != nil { - pos := sourcePosHCL(*diag.Subject) - ret[i].Pos = &pos - } - } - return ret -} - -func diagnosticsError(err error) Diagnostics { - if err == nil { - return nil - } - - if posErr, ok := err.(*legacyhclparser.PosError); ok { - pos := sourcePosLegacyHCL(posErr.Pos, "") - return Diagnostics{ - Diagnostic{ - Severity: DiagError, - Summary: posErr.Err.Error(), - Pos: &pos, - }, - } - } - - return Diagnostics{ - Diagnostic{ - Severity: DiagError, - Summary: err.Error(), - }, - } -} - -func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { - return diagnosticsError(fmt.Errorf(format, args...)) -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go deleted file mode 100644 index 1604a6e0..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package tfconfig is a helper library that does careful, shallow parsing of -// Terraform modules to provide access to high-level metadata while -// remaining broadly compatible with configurations targeting various -// different Terraform versions. -// -// This packge focuses on describing top-level objects only, and in particular -// does not attempt any sort of processing that would require access to plugins. -// Currently it allows callers to extract high-level information about -// variables, outputs, resource blocks, provider dependencies, and Terraform -// Core dependencies. -// -// This package only works at the level of single modules. A full configuration -// is a tree of potentially several modules, some of which may be references -// to remote packages. There are some basic helpers for traversing calls to -// modules at relative local paths, however. -// -// This package employs a "best effort" parsing strategy, producing as complete -// a result as possible even though the input may not be entirely valid. The -// intended use-case is high-level analysis and indexing of externally-facing -// module characteristics, as opposed to validating or even applying the module. -package tfconfig diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go deleted file mode 100644 index a070f76e..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go +++ /dev/null @@ -1,130 +0,0 @@ -package tfconfig - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadModule reads the directory at the given path and attempts to interpret -// it as a Terraform module. -func LoadModule(dir string) (*Module, Diagnostics) { - - // For broad compatibility here we actually have two separate loader - // codepaths. The main one uses the new HCL parser and API and is intended - // for configurations from Terraform 0.12 onwards (though will work for - // many older configurations too), but we'll also fall back on one that - // uses the _old_ HCL implementation so we can deal with some edge-cases - // that are not valid in new HCL. - - module, diags := loadModule(dir) - if diags.HasErrors() { - // Try using the legacy HCL parser and see if we fare better. - legacyModule, legacyDiags := loadModuleLegacyHCL(dir) - if !legacyDiags.HasErrors() { - legacyModule.init(legacyDiags) - return legacyModule, legacyDiags - } - } - - module.init(diags) - return module, diags -} - -// IsModuleDir checks if the given path contains terraform configuration files. -// This allows the caller to decide how to handle directories that do not have tf files. -func IsModuleDir(dir string) bool { - primaryPaths, _ := dirFiles(dir) - if len(primaryPaths) == 0 { - return false - } - return true -} - -func (m *Module) init(diags Diagnostics) { - // Fill in any additional provider requirements that are implied by - // resource configurations, to avoid the caller from needing to apply - // this logic itself. Implied requirements don't have version constraints, - // but we'll make sure the requirement value is still non-nil in this - // case so callers can easily recognize it. - for _, r := range m.ManagedResources { - if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { - m.RequiredProviders[r.Provider.Name] = &ProviderRequirement{} - } - } - for _, r := range m.DataResources { - if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { - m.RequiredProviders[r.Provider.Name] = &ProviderRequirement{} - } - } - - // We redundantly also reference the diagnostics from inside the module - // object, primarily so that we can easily included in JSON-serialized - // versions of the module object. - m.Diagnostics = diags -} - -func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { - infos, err := ioutil.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - var override []string - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || isIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - // We are assuming that any _override files will be logically named, - // and processing the files in alphabetical order. Primaries first, then overrides. - primary = append(primary, override...) - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// isIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func isIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go deleted file mode 100644 index f83ac872..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go +++ /dev/null @@ -1,319 +0,0 @@ -package tfconfig - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclparse" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -func loadModule(dir string) (*Module, Diagnostics) { - mod := newModule(dir) - primaryPaths, diags := dirFiles(dir) - - parser := hclparse.NewParser() - - for _, filename := range primaryPaths { - var file *hcl.File - var fileDiags hcl.Diagnostics - if strings.HasSuffix(filename, ".json") { - file, fileDiags = parser.ParseJSONFile(filename) - } else { - file, fileDiags = parser.ParseHCLFile(filename) - } - diags = append(diags, fileDiags...) - if file == nil { - continue - } - - content, _, contentDiags := file.Body.PartialContent(rootSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) - diags = append(diags, contentDiags...) - - if attr, defined := content.Attributes["required_version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredCore = append(mod.RequiredCore, version) - } - } - - for _, innerBlock := range content.Blocks { - switch innerBlock.Type { - case "required_providers": - reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) - diags = append(diags, reqsDiags...) - for name, req := range reqs { - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = req - } else { - mod.RequiredProviders[name].VersionConstraints = append(mod.RequiredProviders[name].VersionConstraints, req.VersionConstraints...) - } - } - } - } - - case "variable": - content, _, contentDiags := block.Body.PartialContent(variableSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - v := &Variable{ - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - mod.Variables[name] = v - - if attr, defined := content.Attributes["type"]; defined { - // We handle this particular attribute in a somewhat-tricky way: - // since Terraform may evolve its type expression syntax in - // future versions, we don't want to be overly-strict in how - // we handle it here, and so we'll instead just take the raw - // source provided by the user, using the source location - // information in the expression object. - // - // However, older versions of Terraform expected the type - // to be a string containing a keyword, so we'll need to - // handle that as a special case first for backward compatibility. - - var typeExpr string - - var typeExprAsStr string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) - if !valDiags.HasErrors() { - typeExpr = typeExprAsStr - } else { - - rng := attr.Expr.Range() - sourceFilename := rng.Filename - source, exists := parser.Sources()[sourceFilename] - if exists { - typeExpr = string(rng.SliceBytes(source)) - } else { - // This should never happen, so we'll just warn about it and leave the type unspecified. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Source code not available", - Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), - Subject: &block.DefRange, - }) - typeExpr = "" - } - - } - - v.Type = typeExpr - } - - if attr, defined := content.Attributes["description"]; defined { - var description string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) - diags = append(diags, valDiags...) - v.Description = description - } - - if attr, defined := content.Attributes["default"]; defined { - // To avoid the caller needing to deal with cty here, we'll - // use its JSON encoding to convert into an - // approximately-equivalent plain Go interface{} value - // to return. - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - if val.IsWhollyKnown() { // should only be false if there are errors in the input - valJSON, err := ctyjson.Marshal(val, val.Type()) - if err != nil { - // Should never happen, since all possible known - // values have a JSON mapping. - panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) - } - var def interface{} - err = json.Unmarshal(valJSON, &def) - if err != nil { - // Again should never happen, because valJSON is - // guaranteed valid by ctyjson.Marshal. - panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) - } - v.Default = def - } - } - - case "output": - - content, _, contentDiags := block.Body.PartialContent(outputSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - o := &Output{ - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - mod.Outputs[name] = o - - if attr, defined := content.Attributes["description"]; defined { - var description string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) - diags = append(diags, valDiags...) - o.Description = description - } - - case "provider": - - content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - // Even if there isn't an explicit version required, we still - // need an entry in our map to signal the unversioned dependency. - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = &ProviderRequirement{} - } - if attr, defined := content.Attributes["version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - mod.RequiredProviders[name].VersionConstraints = append(mod.RequiredProviders[name].VersionConstraints, version) - } - } - - case "resource", "data": - - content, _, contentDiags := block.Body.PartialContent(resourceSchema) - diags = append(diags, contentDiags...) - - typeName := block.Labels[0] - name := block.Labels[1] - - r := &Resource{ - Type: typeName, - Name: name, - Pos: sourcePosHCL(block.DefRange), - } - - var resourcesMap map[string]*Resource - - switch block.Type { - case "resource": - r.Mode = ManagedResourceMode - resourcesMap = mod.ManagedResources - case "data": - r.Mode = DataResourceMode - resourcesMap = mod.DataResources - } - - key := r.MapKey() - - resourcesMap[key] = r - - if attr, defined := content.Attributes["provider"]; defined { - // New style here is to provide this as a naked traversal - // expression, but we also support quoted references for - // older configurations that predated this convention. - traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) - if travDiags.HasErrors() { - traversal = nil // in case we got any partial results - - // Fall back on trying to parse as a string - var travStr string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) - if !valDiags.HasErrors() { - var strDiags hcl.Diagnostics - traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) - if strDiags.HasErrors() { - traversal = nil - } - } - } - - // If we get out here with a nil traversal then we didn't - // succeed in processing the input. - if len(traversal) > 0 { - providerName := traversal.RootName() - alias := "" - if len(traversal) > 1 { - if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { - alias = getAttr.Name - } - } - r.Provider = ProviderRef{ - Name: providerName, - Alias: alias, - } - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider reference", - Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", - Subject: attr.Expr.Range().Ptr(), - }) - } - } else { - // If provider _isn't_ set then we'll infer it from the - // resource type. - r.Provider = ProviderRef{ - Name: resourceTypeDefaultProviderName(r.Type), - } - } - - case "module": - - content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) - diags = append(diags, contentDiags...) - - name := block.Labels[0] - mc := &ModuleCall{ - Name: block.Labels[0], - Pos: sourcePosHCL(block.DefRange), - } - - // check if this is overriding an existing module - var origSource string - if origMod, exists := mod.ModuleCalls[name]; exists { - origSource = origMod.Source - } - - mod.ModuleCalls[name] = mc - - if attr, defined := content.Attributes["source"]; defined { - var source string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) - diags = append(diags, valDiags...) - mc.Source = source - } - - if mc.Source == "" { - mc.Source = origSource - } - - if attr, defined := content.Attributes["version"]; defined { - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - mc.Version = version - } - - default: - // Should never happen because our cases above should be - // exhaustive for our schema. - panic(fmt.Errorf("unhandled block type %q", block.Type)) - } - } - } - - return mod, diagnosticsHCL(diags) -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go deleted file mode 100644 index c79b033b..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go +++ /dev/null @@ -1,323 +0,0 @@ -package tfconfig - -import ( - "io/ioutil" - "strings" - - legacyhcl "github.com/hashicorp/hcl" - legacyast "github.com/hashicorp/hcl/hcl/ast" -) - -func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { - // This implementation is intentionally more quick-and-dirty than the - // main loader. In particular, it doesn't bother to keep careful track - // of multiple error messages because we always fall back on returning - // the main parser's error message if our fallback parsing produces - // an error, and thus the errors here are not seen by the end-caller. - mod := newModule(dir) - - primaryPaths, diags := dirFiles(dir) - if diags.HasErrors() { - return mod, diagnosticsHCL(diags) - } - - for _, filename := range primaryPaths { - src, err := ioutil.ReadFile(filename) - if err != nil { - return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) - } - - hclRoot, err := legacyhcl.Parse(string(src)) - if err != nil { - return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) - } - - list, ok := hclRoot.Node.(*legacyast.ObjectList) - if !ok { - return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) - } - - for _, item := range list.Filter("terraform").Items { - if len(item.Keys) > 0 { - item = &legacyast.ObjectItem{ - Val: &legacyast.ObjectType{ - List: &legacyast.ObjectList{ - Items: []*legacyast.ObjectItem{item}, - }, - }, - } - } - - type TerraformBlock struct { - RequiredVersion string `hcl:"required_version"` - } - var block TerraformBlock - err = legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("terraform block: %s", err) - } - - if block.RequiredVersion != "" { - mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) - } - } - - if vars := list.Filter("variable"); len(vars.Items) > 0 { - vars = vars.Children() - type VariableBlock struct { - Type string `hcl:"type"` - Default interface{} - Description string - Fields []string `hcl:",decodedFields"` - } - - for _, item := range vars.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block VariableBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) - } - - // Clean up legacy HCL decoding ambiguity by unwrapping list of maps - if ms, ok := block.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - block.Default = def - } - - v := &Variable{ - Name: name, - Type: block.Type, - Description: block.Description, - Default: block.Default, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - if _, exists := mod.Variables[name]; exists { - return nil, diagnosticsErrorf("duplicate variable block for %q", name) - } - mod.Variables[name] = v - - } - } - - if outputs := list.Filter("output"); len(outputs.Items) > 0 { - outputs = outputs.Children() - type OutputBlock struct { - Description string - } - - for _, item := range outputs.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block OutputBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) - } - - o := &Output{ - Name: name, - Description: block.Description, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - if _, exists := mod.Outputs[name]; exists { - return nil, diagnosticsErrorf("duplicate output block for %q", name) - } - mod.Outputs[name] = o - } - } - - for _, blockType := range []string{"resource", "data"} { - if resources := list.Filter(blockType); len(resources.Items) > 0 { - resources = resources.Children() - type ResourceBlock struct { - Provider string - } - - for _, item := range resources.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 2) - - if len(item.Keys) != 2 { - return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) - } - - typeName := item.Keys[0].Token.Value().(string) - name := item.Keys[1].Token.Value().(string) - var mode ResourceMode - var rMap map[string]*Resource - switch blockType { - case "resource": - mode = ManagedResourceMode - rMap = mod.ManagedResources - case "data": - mode = DataResourceMode - rMap = mod.DataResources - } - - var block ResourceBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) - } - - var providerName, providerAlias string - if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { - providerName = block.Provider[:dotPos] - providerAlias = block.Provider[dotPos+1:] - } else { - providerName = block.Provider - } - if providerName == "" { - providerName = resourceTypeDefaultProviderName(typeName) - } - - r := &Resource{ - Mode: mode, - Type: typeName, - Name: name, - Provider: ProviderRef{ - Name: providerName, - Alias: providerAlias, - }, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - key := r.MapKey() - if _, exists := rMap[key]; exists { - return nil, diagnosticsErrorf("duplicate resource block for %q", key) - } - rMap[key] = r - } - } - - } - - if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { - moduleCalls = moduleCalls.Children() - type ModuleBlock struct { - Source string - Version string - } - - for _, item := range moduleCalls.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block ModuleBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) - } - - mc := &ModuleCall{ - Name: name, - Source: block.Source, - Version: block.Version, - Pos: sourcePosLegacyHCL(item.Pos(), filename), - } - // it's possible this module call is from an override file - if origMod, exists := mod.ModuleCalls[name]; exists { - if mc.Source == "" { - mc.Source = origMod.Source - } - } - mod.ModuleCalls[name] = mc - } - } - - if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { - providerConfigs = providerConfigs.Children() - type ProviderBlock struct { - Version string - } - - for _, item := range providerConfigs.Items { - unwrapLegacyHCLObjectKeysFromJSON(item, 1) - - if len(item.Keys) != 1 { - return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) - } - - name := item.Keys[0].Token.Value().(string) - - var block ProviderBlock - err := legacyhcl.DecodeObject(&block, item.Val) - if err != nil { - return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) - } - // Even if there wasn't an explicit version required, we still - // need an entry in our map to signal the unversioned dependency. - if _, exists := mod.RequiredProviders[name]; !exists { - mod.RequiredProviders[name] = &ProviderRequirement{} - } - - if block.Version != "" { - mod.RequiredProviders[name].VersionConstraints = append(mod.RequiredProviders[name].VersionConstraints, block.Version) - } - } - } - } - - return mod, nil -} - -// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when -// parsing JSON as input: if we're parsing JSON then directly nested -// items will show up as additional "keys". -// -// For objects that expect a fixed number of keys, this breaks the -// decoding process. This function unwraps the object into what it would've -// looked like if it came directly from HCL by specifying the number of keys -// you expect. -// -// Example: -// -// { "foo": { "baz": {} } } -// -// Will show up with Keys being: []string{"foo", "baz"} -// when we really just want the first two. This function will fix this. -func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { - if len(item.Keys) > depth && item.Keys[0].Token.JSON { - for len(item.Keys) > depth { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &legacyast.ObjectType{ - List: &legacyast.ObjectList{ - Items: []*legacyast.ObjectItem{ - &legacyast.ObjectItem{ - Keys: []*legacyast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go deleted file mode 100644 index 63027d18..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfconfig - -// Module is the top-level type representing a parsed and processed Terraform -// module. -type Module struct { - // Path is the local filesystem directory where the module was loaded from. - Path string `json:"path"` - - Variables map[string]*Variable `json:"variables"` - Outputs map[string]*Output `json:"outputs"` - - RequiredCore []string `json:"required_core,omitempty"` - RequiredProviders map[string]*ProviderRequirement `json:"required_providers"` - - ManagedResources map[string]*Resource `json:"managed_resources"` - DataResources map[string]*Resource `json:"data_resources"` - ModuleCalls map[string]*ModuleCall `json:"module_calls"` - - // Diagnostics records any errors and warnings that were detected during - // loading, primarily for inclusion in serialized forms of the module - // since this slice is also returned as a second argument from LoadModule. - Diagnostics Diagnostics `json:"diagnostics,omitempty"` -} - -func newModule(path string) *Module { - return &Module{ - Path: path, - Variables: make(map[string]*Variable), - Outputs: make(map[string]*Output), - RequiredProviders: make(map[string]*ProviderRequirement), - ManagedResources: make(map[string]*Resource), - DataResources: make(map[string]*Resource), - ModuleCalls: make(map[string]*ModuleCall), - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go deleted file mode 100644 index 5e1e05a7..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go +++ /dev/null @@ -1,11 +0,0 @@ -package tfconfig - -// ModuleCall represents a "module" block within a module. That is, a -// declaration of a child module from inside its parent. -type ModuleCall struct { - Name string `json:"name"` - Source string `json:"source"` - Version string `json:"version,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go deleted file mode 100644 index 890b25e6..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go +++ /dev/null @@ -1,9 +0,0 @@ -package tfconfig - -// Output represents a single output from a Terraform module. -type Output struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go deleted file mode 100644 index 157c8c2c..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go +++ /dev/null @@ -1,85 +0,0 @@ -package tfconfig - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/zclconf/go-cty/cty/gocty" -) - -// ProviderRef is a reference to a provider configuration within a module. -// It represents the contents of a "provider" argument in a resource, or -// a value in the "providers" map for a module call. -type ProviderRef struct { - Name string `json:"name"` - Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced -} - -type ProviderRequirement struct { - Source string `json:"source,omitempty"` - VersionConstraints []string `json:"version_constraints,omitempty"` -} - -func decodeRequiredProvidersBlock(block *hcl.Block) (map[string]*ProviderRequirement, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - reqs := make(map[string]*ProviderRequirement) - for name, attr := range attrs { - expr, err := attr.Expr.Value(nil) - if err != nil { - diags = append(diags, err...) - } - - switch { - case expr.Type().IsPrimitiveType(): - var version string - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) - diags = append(diags, valDiags...) - if !valDiags.HasErrors() { - reqs[name] = &ProviderRequirement{ - VersionConstraints: []string{version}, - } - } - - case expr.Type().IsObjectType(): - var pr ProviderRequirement - if expr.Type().HasAttribute("version") { - var version string - err := gocty.FromCtyValue(expr.GetAttr("version"), &version) - if err == nil { - pr.VersionConstraints = append(pr.VersionConstraints, version) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: "Unsuitable value: string required", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - if expr.Type().HasAttribute("source") { - var source string - err := gocty.FromCtyValue(expr.GetAttr("source"), &source) - if err == nil { - pr.Source = source - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: "Unsuitable value: string required", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - reqs[name] = &pr - - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsuitable value type", - Detail: "Unsuitable value: string required", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - return reqs, diags -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go deleted file mode 100644 index 401c8fce..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go +++ /dev/null @@ -1,64 +0,0 @@ -package tfconfig - -import ( - "fmt" - "strconv" - "strings" -) - -// Resource represents a single "resource" or "data" block within a module. -type Resource struct { - Mode ResourceMode `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - - Provider ProviderRef `json:"provider"` - - Pos SourcePos `json:"pos"` -} - -// MapKey returns a string that can be used to uniquely identify the receiver -// in a map[string]*Resource. -func (r *Resource) MapKey() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // should never happen - return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) - } -} - -// ResourceMode represents the "mode" of a resource, which is used to -// distinguish between managed resources ("resource" blocks in config) and -// data resources ("data" blocks in config). -type ResourceMode rune - -const InvalidResourceMode ResourceMode = 0 -const ManagedResourceMode ResourceMode = 'M' -const DataResourceMode ResourceMode = 'D' - -func (m ResourceMode) String() string { - switch m { - case ManagedResourceMode: - return "managed" - case DataResourceMode: - return "data" - default: - return "" - } -} - -// MarshalJSON implements encoding/json.Marshaler. -func (m ResourceMode) MarshalJSON() ([]byte, error) { - return []byte(strconv.Quote(m.String())), nil -} - -func resourceTypeDefaultProviderName(typeName string) string { - if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { - return typeName[:underPos] - } - return typeName -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go deleted file mode 100644 index fd6ca9e7..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go +++ /dev/null @@ -1,106 +0,0 @@ -package tfconfig - -import ( - "github.com/hashicorp/hcl/v2" -) - -var rootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - LabelNames: nil, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - }, -} - -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "required_providers", - }, - }, -} - -var providerConfigSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "version", - }, - { - Name: "alias", - }, - }, -} - -var variableSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "type", - }, - { - Name: "description", - }, - { - Name: "default", - }, - }, -} - -var outputSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - }, -} - -var moduleCallSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - }, - { - Name: "version", - }, - { - Name: "providers", - }, - }, -} - -var resourceSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "provider", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go deleted file mode 100644 index 548c9f9a..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go +++ /dev/null @@ -1,50 +0,0 @@ -package tfconfig - -import ( - legacyhcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/v2" -) - -// SourcePos is a pointer to a particular location in a source file. -// -// This type is embedded into other structs to allow callers to locate the -// definition of each described module element. The SourcePos of an element -// is usually the first line of its definition, although the definition can -// be a little "fuzzy" with JSON-based config files. -type SourcePos struct { - Filename string `json:"filename"` - Line int `json:"line"` -} - -func sourcePos(filename string, line int) SourcePos { - return SourcePos{ - Filename: filename, - Line: line, - } -} - -func sourcePosHCL(rng hcl.Range) SourcePos { - // We intentionally throw away the column information here because - // current and legacy HCL both disagree on the definition of a column - // and so a line-only reference is the best granularity we can do - // such that the result is consistent between both parsers. - return SourcePos{ - Filename: rng.Filename, - Line: rng.Start.Line, - } -} - -func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { - useFilename := pos.Filename - // We'll try to use the filename given in legacy HCL position, but - // in practice there's no way to actually get this populated via - // the HCL API so it's usually empty except in some specialized - // situations, such as positions in error objects. - if useFilename == "" { - useFilename = filename - } - return SourcePos{ - Filename: useFilename, - Line: pos.Line, - } -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go deleted file mode 100644 index 0f73fc99..00000000 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go +++ /dev/null @@ -1,16 +0,0 @@ -package tfconfig - -// Variable represents a single variable from a Terraform module. -type Variable struct { - Name string `json:"name"` - Type string `json:"type,omitempty"` - Description string `json:"description,omitempty"` - - // Default is an approximate representation of the default value in - // the native Go type system. The conversion from the value given in - // configuration may be slightly lossy. Only values that can be - // serialized by json.Marshal will be included here. - Default interface{} `json:"default,omitempty"` - - Pos SourcePos `json:"pos"` -} diff --git a/vendor/github.com/hashicorp/terraform-exec/LICENSE b/vendor/github.com/hashicorp/terraform-exec/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go new file mode 100644 index 00000000..4e567eea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -0,0 +1,9 @@ +package version + +const version = "0.13.0" + +// ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. +// This is a function to allow for future possible enhancement using debug.BuildInfo. +func ModuleVersion() string { + return version +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go new file mode 100644 index 00000000..82d09d5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go @@ -0,0 +1,155 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type applyConfig struct { + backup string + dirOrPlan string + lock bool + + // LockTimeout must be a string with time unit, e.g. '10s' + lockTimeout string + parallelism int + reattachInfo ReattachInfo + refresh bool + state string + stateOut string + targets []string + + // Vars: each var must be supplied as a single string, e.g. 'foo=bar' + vars []string + varFiles []string +} + +var defaultApplyOptions = applyConfig{ + lock: true, + parallelism: 10, + refresh: true, +} + +// ApplyOption represents options used in the Apply method. +type ApplyOption interface { + configureApply(*applyConfig) +} + +func (opt *ParallelismOption) configureApply(conf *applyConfig) { + conf.parallelism = opt.parallelism +} + +func (opt *BackupOption) configureApply(conf *applyConfig) { + conf.backup = opt.path +} + +func (opt *TargetOption) configureApply(conf *applyConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *LockTimeoutOption) configureApply(conf *applyConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureApply(conf *applyConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureApply(conf *applyConfig) { + conf.stateOut = opt.path +} + +func (opt *VarFileOption) configureApply(conf *applyConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +func (opt *LockOption) configureApply(conf *applyConfig) { + conf.lock = opt.lock +} + +func (opt *RefreshOption) configureApply(conf *applyConfig) { + conf.refresh = opt.refresh +} + +func (opt *VarOption) configureApply(conf *applyConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *DirOrPlanOption) configureApply(conf *applyConfig) { + conf.dirOrPlan = opt.path +} + +func (opt *ReattachOption) configureApply(conf *applyConfig) { + conf.reattachInfo = opt.info +} + +// Apply represents the terraform apply subcommand. +func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { + cmd, err := tf.applyCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { + c := defaultApplyOptions + + for _, o := range opts { + o.configureApply(&c) + } + + args := []string{"apply", "-no-color", "-auto-approve", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) + args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // string argument: pass if set + if c.dirOrPlan != "" { + args = append(args, c.dirOrPlan) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go new file mode 100644 index 00000000..e792dc9c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go @@ -0,0 +1,232 @@ +package tfexec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/hashicorp/terraform-exec/internal/version" +) + +const ( + checkpointDisableEnvVar = "CHECKPOINT_DISABLE" + cliArgsEnvVar = "TF_CLI_ARGS" + logEnvVar = "TF_LOG" + inputEnvVar = "TF_INPUT" + automationEnvVar = "TF_IN_AUTOMATION" + logPathEnvVar = "TF_LOG_PATH" + reattachEnvVar = "TF_REATTACH_PROVIDERS" + appendUserAgentEnvVar = "TF_APPEND_USER_AGENT" + workspaceEnvVar = "TF_WORKSPACE" + disablePluginTLSEnvVar = "TF_DISABLE_PLUGIN_TLS" + skipProviderVerifyEnvVar = "TF_SKIP_PROVIDER_VERIFY" + + varEnvVarPrefix = "TF_VAR_" + cliArgEnvVarPrefix = "TF_CLI_ARGS_" +) + +var prohibitedEnvVars = []string{ + cliArgsEnvVar, + inputEnvVar, + automationEnvVar, + logPathEnvVar, + logEnvVar, + reattachEnvVar, + appendUserAgentEnvVar, + workspaceEnvVar, + disablePluginTLSEnvVar, + skipProviderVerifyEnvVar, +} + +var prohibitedEnvVarPrefixes = []string{ + varEnvVarPrefix, + cliArgEnvVarPrefix, +} + +func manualEnvVars(env map[string]string, cb func(k string)) { + for k := range env { + for _, p := range prohibitedEnvVars { + if p == k { + cb(k) + goto NextEnvVar + } + } + for _, prefix := range prohibitedEnvVarPrefixes { + if strings.HasPrefix(k, prefix) { + cb(k) + goto NextEnvVar + } + } + NextEnvVar: + } +} + +// ProhibitedEnv returns a slice of environment variable keys that are not allowed +// to be set manually from the passed environment. +func ProhibitedEnv(env map[string]string) []string { + var p []string + manualEnvVars(env, func(k string) { + p = append(p, k) + }) + return p +} + +// CleanEnv removes any prohibited environment variables from an environment map. +func CleanEnv(dirty map[string]string) map[string]string { + clean := dirty + manualEnvVars(clean, func(k string) { + delete(clean, k) + }) + return clean +} + +func envMap(environ []string) map[string]string { + env := map[string]string{} + for _, ev := range environ { + parts := strings.SplitN(ev, "=", 2) + if len(parts) == 0 { + continue + } + k := parts[0] + v := "" + if len(parts) == 2 { + v = parts[1] + } + env[k] = v + } + return env +} + +func envSlice(environ map[string]string) []string { + env := []string{} + for k, v := range environ { + env = append(env, k+"="+v) + } + return env +} + +func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { + // set Terraform level env, if env is nil, fall back to os.Environ + var env map[string]string + if tf.env == nil { + env = envMap(os.Environ()) + } else { + env = make(map[string]string, len(tf.env)) + for k, v := range tf.env { + env[k] = v + } + } + + // override env with any command specific environment + for k, v := range mergeEnv { + env[k] = v + } + + // always propagate CHECKPOINT_DISABLE env var unless it is + // explicitly overridden with tf.SetEnv or command env + if _, ok := env[checkpointDisableEnvVar]; !ok { + env[checkpointDisableEnvVar] = os.Getenv(checkpointDisableEnvVar) + } + + // always override user agent + ua := mergeUserAgent( + os.Getenv(appendUserAgentEnvVar), + tf.appendUserAgent, + fmt.Sprintf("HashiCorp-terraform-exec/%s", version.ModuleVersion()), + ) + env[appendUserAgentEnvVar] = ua + + // always override logging + if tf.logPath == "" { + // so logging can't pollute our stderr output + env[logEnvVar] = "" + env[logPathEnvVar] = "" + } else { + env[logPathEnvVar] = tf.logPath + // Log levels other than TRACE are currently unreliable, the CLI recommends using TRACE only. + env[logEnvVar] = "TRACE" + } + + // constant automation override env vars + env[automationEnvVar] = "1" + + // force usage of workspace methods for switching + env[workspaceEnvVar] = "" + + if tf.disablePluginTLS { + env[disablePluginTLSEnvVar] = "1" + } + + if tf.skipProviderVerify { + env[skipProviderVerifyEnvVar] = "1" + } + + return envSlice(env) +} + +func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string]string, args ...string) *exec.Cmd { + cmd := exec.Command(tf.execPath, args...) + + cmd.Env = tf.buildEnv(mergeEnv) + cmd.Dir = tf.workingDir + + tf.logger.Printf("[INFO] running Terraform command: %s", cmdString(cmd)) + + return cmd +} + +func (tf *Terraform) runTerraformCmdJSON(ctx context.Context, cmd *exec.Cmd, v interface{}) error { + var outbuf = bytes.Buffer{} + cmd.Stdout = mergeWriters(cmd.Stdout, &outbuf) + + err := tf.runTerraformCmd(ctx, cmd) + if err != nil { + return err + } + + dec := json.NewDecoder(&outbuf) + dec.UseNumber() + return dec.Decode(v) +} + +// mergeUserAgent does some minor deduplication to ensure we aren't +// just using the same append string over and over. +func mergeUserAgent(uas ...string) string { + included := map[string]bool{} + merged := []string{} + for _, ua := range uas { + ua = strings.TrimSpace(ua) + + if ua == "" { + continue + } + if included[ua] { + continue + } + included[ua] = true + merged = append(merged, ua) + } + return strings.Join(merged, " ") +} + +func mergeWriters(writers ...io.Writer) io.Writer { + compact := []io.Writer{} + for _, w := range writers { + if w != nil { + compact = append(compact, w) + } + } + if len(compact) == 0 { + return ioutil.Discard + } + if len(compact) == 1 { + return compact[0] + } + return io.MultiWriter(compact...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go new file mode 100644 index 00000000..2e88dd3e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -0,0 +1,45 @@ +// +build !linux + +package tfexec + +import ( + "context" + "os/exec" + "strings" +) + +func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { + var errBuf strings.Builder + + cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) + cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) + + go func() { + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { + err := cmd.Process.Kill() + if err != nil { + tf.logger.Printf("error from kill: %s", err) + } + } + } + }() + + // check for early cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := cmd.Run() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, errBuf.String()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go new file mode 100644 index 00000000..7cbdcb96 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -0,0 +1,54 @@ +package tfexec + +import ( + "context" + "os/exec" + "strings" + "syscall" +) + +func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { + var errBuf strings.Builder + + cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) + cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) + + cmd.SysProcAttr = &syscall.SysProcAttr{ + // kill children if parent is dead + Pdeathsig: syscall.SIGKILL, + // set process group ID + Setpgid: true, + } + + go func() { + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { + // send SIGINT to process group + err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) + if err != nil { + tf.logger.Printf("error from SIGINT: %s", err) + } + } + + // TODO: send a kill if it doesn't respond for a bit? + } + }() + + // check for early cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := cmd.Run() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, errBuf.String()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go new file mode 100644 index 00000000..4f81d114 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring.go @@ -0,0 +1,12 @@ +// +build go1.13 + +package tfexec + +import ( + "os/exec" +) + +// cmdString handles go 1.12 as stringer was only added to exec.Cmd in 1.13 +func cmdString(c *exec.Cmd) string { + return c.String() +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go new file mode 100644 index 00000000..75614dbf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmdstring_go112.go @@ -0,0 +1,19 @@ +// +build !go1.13 + +package tfexec + +import ( + "os/exec" + "strings" +) + +// cmdString handles go 1.12 as stringer was only added to exec.Cmd in 1.13 +func cmdString(c *exec.Cmd) string { + b := new(strings.Builder) + b.WriteString(c.Path) + for _, a := range c.Args[1:] { + b.WriteByte(' ') + b.WriteString(a) + } + return b.String() +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go new file mode 100644 index 00000000..8011c0ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go @@ -0,0 +1,156 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type destroyConfig struct { + backup string + dir string + lock bool + + // LockTimeout must be a string with time unit, e.g. '10s' + lockTimeout string + parallelism int + reattachInfo ReattachInfo + refresh bool + state string + stateOut string + targets []string + + // Vars: each var must be supplied as a single string, e.g. 'foo=bar' + vars []string + varFiles []string +} + +var defaultDestroyOptions = destroyConfig{ + lock: true, + lockTimeout: "0s", + parallelism: 10, + refresh: true, +} + +// DestroyOption represents options used in the Destroy method. +type DestroyOption interface { + configureDestroy(*destroyConfig) +} + +func (opt *DirOption) configureDestroy(conf *destroyConfig) { + conf.dir = opt.path +} + +func (opt *ParallelismOption) configureDestroy(conf *destroyConfig) { + conf.parallelism = opt.parallelism +} + +func (opt *BackupOption) configureDestroy(conf *destroyConfig) { + conf.backup = opt.path +} + +func (opt *TargetOption) configureDestroy(conf *destroyConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *LockTimeoutOption) configureDestroy(conf *destroyConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureDestroy(conf *destroyConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureDestroy(conf *destroyConfig) { + conf.stateOut = opt.path +} + +func (opt *VarFileOption) configureDestroy(conf *destroyConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +func (opt *LockOption) configureDestroy(conf *destroyConfig) { + conf.lock = opt.lock +} + +func (opt *RefreshOption) configureDestroy(conf *destroyConfig) { + conf.refresh = opt.refresh +} + +func (opt *VarOption) configureDestroy(conf *destroyConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *ReattachOption) configureDestroy(conf *destroyConfig) { + conf.reattachInfo = opt.info +} + +// Destroy represents the terraform destroy subcommand. +func (tf *Terraform) Destroy(ctx context.Context, opts ...DestroyOption) error { + cmd, err := tf.destroyCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { + c := defaultDestroyOptions + + for _, o := range opts { + o.configureDestroy(&c) + } + + args := []string{"destroy", "-no-color", "-auto-approve", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) + args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go new file mode 100644 index 00000000..0e82bbd9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go @@ -0,0 +1,4 @@ +// Package tfexec exposes functionality for constructing and running Terraform +// CLI commands. Structured return values use the data types defined in the +// github.com/hashicorp/terraform-json package. +package tfexec diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go new file mode 100644 index 00000000..7a32ef2f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go @@ -0,0 +1,39 @@ +package tfexec + +import "fmt" + +// this file contains non-parsed exported errors + +type ErrNoSuitableBinary struct { + err error +} + +func (e *ErrNoSuitableBinary) Error() string { + return fmt.Sprintf("no suitable terraform binary could be found: %s", e.err.Error()) +} + +func (e *ErrNoSuitableBinary) Unwrap() error { + return e.err +} + +// ErrVersionMismatch is returned when the detected Terraform version is not compatible with the +// command or flags being used in this invocation. +type ErrVersionMismatch struct { + MinInclusive string + MaxExclusive string + Actual string +} + +func (e *ErrVersionMismatch) Error() string { + return fmt.Sprintf("unexpected version %s (min: %s, max: %s)", e.Actual, e.MinInclusive, e.MaxExclusive) +} + +// ErrManualEnvVar is returned when an env var that should be set programatically via an option or method +// is set via the manual environment passing functions. +type ErrManualEnvVar struct { + Name string +} + +func (err *ErrManualEnvVar) Error() string { + return fmt.Sprintf("manual setting of env var %q detected", err.Name) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go new file mode 100644 index 00000000..5596fa2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go @@ -0,0 +1,247 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "strings" +) + +// this file contains errors parsed from stderr + +var ( + // The "Required variable not set:" case is for 0.11 + missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) + missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) + + usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) + + // "Could not load plugin" is present in 0.13 + noInitErrRegexp = regexp.MustCompile(`Error: Could not satisfy plugin requirements|Error: Could not load plugin`) + + noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) + + workspaceDoesNotExistRegexp = regexp.MustCompile(`Workspace "(.+)" doesn't exist.`) + + workspaceAlreadyExistsRegexp = regexp.MustCompile(`Workspace "(.+)" already exists`) + + tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) + tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) + configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) +) + +func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string) error { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // not an exit error, short circuit, nothing to wrap + return err + } + + ctxErr := ctx.Err() + + // nothing to parse, return early + errString := strings.TrimSpace(stderr) + if errString == "" { + return &unwrapper{exitErr, ctxErr} + } + + switch { + case tfVersionMismatchErrRegexp.MatchString(stderr): + constraint := "" + constraints := tfVersionMismatchConstraintRegexp.FindStringSubmatch(stderr) + for i := 1; i < len(constraints); i++ { + constraint = strings.TrimSpace(constraints[i]) + if constraint != "" { + break + } + } + + if constraint == "" { + // hardcode a value here for weird cases (incl. 0.12) + constraint = "unknown" + } + + // only set this if it happened to be cached already + ver := "" + if tf != nil && tf.execVersion != nil { + ver = tf.execVersion.String() + } + + return &ErrTFVersionMismatch{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Constraint: constraint, + TFVersion: ver, + } + case missingVarErrRegexp.MatchString(stderr): + name := "" + names := missingVarNameRegexp.FindStringSubmatch(stderr) + for i := 1; i < len(names); i++ { + name = strings.TrimSpace(names[i]) + if name != "" { + break + } + } + + return &ErrMissingVar{ + unwrapper: unwrapper{exitErr, ctxErr}, + + VariableName: name, + } + case usageRegexp.MatchString(stderr): + return &ErrCLIUsage{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case noInitErrRegexp.MatchString(stderr): + return &ErrNoInit{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case noConfigErrRegexp.MatchString(stderr): + return &ErrNoConfig{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case workspaceDoesNotExistRegexp.MatchString(stderr): + submatches := workspaceDoesNotExistRegexp.FindStringSubmatch(stderr) + if len(submatches) == 2 { + return &ErrNoWorkspace{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Name: submatches[1], + } + } + case workspaceAlreadyExistsRegexp.MatchString(stderr): + submatches := workspaceAlreadyExistsRegexp.FindStringSubmatch(stderr) + if len(submatches) == 2 { + return &ErrWorkspaceExists{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Name: submatches[1], + } + } + case configInvalidErrRegexp.MatchString(stderr): + return &ErrConfigInvalid{stderr: stderr} + } + + return fmt.Errorf("%w\n%s", &unwrapper{exitErr, ctxErr}, stderr) +} + +type unwrapper struct { + err error + ctxErr error +} + +func (u *unwrapper) Unwrap() error { + return u.err +} + +func (u *unwrapper) Is(target error) bool { + switch target { + case context.DeadlineExceeded, context.Canceled: + return u.ctxErr == context.DeadlineExceeded || + u.ctxErr == context.Canceled + } + return false +} + +func (u *unwrapper) Error() string { + return u.err.Error() +} + +type ErrConfigInvalid struct { + stderr string +} + +func (e *ErrConfigInvalid) Error() string { + return "configuration is invalid" +} + +type ErrMissingVar struct { + unwrapper + + VariableName string +} + +func (err *ErrMissingVar) Error() string { + return fmt.Sprintf("variable %q was required but not supplied", err.VariableName) +} + +type ErrNoWorkspace struct { + unwrapper + + Name string +} + +func (err *ErrNoWorkspace) Error() string { + return fmt.Sprintf("workspace %q does not exist", err.Name) +} + +// ErrWorkspaceExists is returned when creating a workspace that already exists +type ErrWorkspaceExists struct { + unwrapper + + Name string +} + +func (err *ErrWorkspaceExists) Error() string { + return fmt.Sprintf("workspace %q already exists", err.Name) +} + +type ErrNoInit struct { + unwrapper + + stderr string +} + +func (e *ErrNoInit) Error() string { + return e.stderr +} + +type ErrNoConfig struct { + unwrapper + + stderr string +} + +func (e *ErrNoConfig) Error() string { + return e.stderr +} + +// ErrCLIUsage is returned when the combination of flags or arguments is incorrect. +// +// CLI indicates usage errors in three different ways: either +// 1. Exit 1, with a custom error message on stderr. +// 2. Exit 1, with command usage logged to stderr. +// 3. Exit 127, with command usage logged to stdout. +// Currently cases 1 and 2 are handled. +// TODO KEM: Handle exit 127 case. How does this work on non-Unix platforms? +type ErrCLIUsage struct { + unwrapper + + stderr string +} + +func (e *ErrCLIUsage) Error() string { + return e.stderr +} + +// ErrTFVersionMismatch is returned when the running Terraform version is not compatible with the +// value specified for required_version in the terraform block. +type ErrTFVersionMismatch struct { + unwrapper + + TFVersion string + + // Constraint is not returned in the error messaging on 0.12 + Constraint string +} + +func (e *ErrTFVersionMismatch) Error() string { + return "terraform core version not supported by configuration" +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go new file mode 100644 index 00000000..10f6cb4c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go @@ -0,0 +1,160 @@ +package tfexec + +import ( + "bytes" + "context" + "fmt" + "io" + "os/exec" + "path/filepath" + "strings" +) + +type formatConfig struct { + recursive bool + dir string +} + +var defaultFormatConfig = formatConfig{ + recursive: false, +} + +type FormatOption interface { + configureFormat(*formatConfig) +} + +func (opt *RecursiveOption) configureFormat(conf *formatConfig) { + conf.recursive = opt.recursive +} + +func (opt *DirOption) configureFormat(conf *formatConfig) { + conf.dir = opt.path +} + +// FormatString formats a passed string, given a path to Terraform. +func FormatString(ctx context.Context, execPath string, content string) (string, error) { + tf, err := NewTerraform(filepath.Dir(execPath), execPath) + if err != nil { + return "", err + } + + return tf.FormatString(ctx, content) +} + +// FormatString formats a passed string. +func (tf *Terraform) FormatString(ctx context.Context, content string) (string, error) { + in := strings.NewReader(content) + var outBuf bytes.Buffer + err := tf.Format(ctx, in, &outBuf) + if err != nil { + return "", err + } + return outBuf.String(), nil +} + +// Format performs formatting on the unformatted io.Reader (as stdin to the CLI) and returns +// the formatted result on the formatted io.Writer. +func (tf *Terraform) Format(ctx context.Context, unformatted io.Reader, formatted io.Writer) error { + cmd, err := tf.formatCmd(ctx, nil, Dir("-")) + if err != nil { + return err + } + + cmd.Stdin = unformatted + cmd.Stdout = mergeWriters(cmd.Stdout, formatted) + + return tf.runTerraformCmd(ctx, cmd) +} + +// FormatWrite attempts to format and modify all config files in the working or selected (via DirOption) directory. +func (tf *Terraform) FormatWrite(ctx context.Context, opts ...FormatOption) error { + for _, o := range opts { + switch o := o.(type) { + case *DirOption: + if o.path == "-" { + return fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString") + } + } + } + + cmd, err := tf.formatCmd(ctx, []string{"-write=true", "-list=false", "-diff=false"}, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + +// FormatCheck returns true if the config files in the working or selected (via DirOption) directory are already formatted. +func (tf *Terraform) FormatCheck(ctx context.Context, opts ...FormatOption) (bool, []string, error) { + for _, o := range opts { + switch o := o.(type) { + case *DirOption: + if o.path == "-" { + return false, nil, fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString") + } + } + } + + cmd, err := tf.formatCmd(ctx, []string{"-write=false", "-list=true", "-diff=false", "-check=true"}, opts...) + if err != nil { + return false, nil, err + } + + var outBuf bytes.Buffer + cmd.Stdout = mergeWriters(cmd.Stdout, &outBuf) + + err = tf.runTerraformCmd(ctx, cmd) + if err == nil { + return true, nil, nil + } + if cmd.ProcessState.ExitCode() == 3 { + // unformatted, parse the file list + + files := []string{} + lines := strings.Split(strings.Replace(outBuf.String(), "\r\n", "\n", -1), "\n") + for _, l := range lines { + l = strings.TrimSpace(l) + if l == "" { + continue + } + files = append(files, l) + } + + return false, files, nil + } + return false, nil, err +} + +func (tf *Terraform) formatCmd(ctx context.Context, args []string, opts ...FormatOption) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_7_7, nil) + if err != nil { + return nil, fmt.Errorf("fmt was first introduced in Terraform 0.7.7: %w", err) + } + + c := defaultFormatConfig + + for _, o := range opts { + switch o.(type) { + case *RecursiveOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-recursive was added to fmt in Terraform 0.12: %w", err) + } + } + + o.configureFormat(&c) + } + + args = append([]string{"fmt", "-no-color"}, args...) + + if c.recursive { + args = append(args, "-recursive") + } + + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go new file mode 100644 index 00000000..e243d728 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go @@ -0,0 +1,141 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type importConfig struct { + addr string + id string + backup string + config string + allowMissingConfig bool + lock bool + lockTimeout string + reattachInfo ReattachInfo + state string + stateOut string + vars []string + varFiles []string +} + +var defaultImportOptions = importConfig{ + allowMissingConfig: false, + lock: true, + lockTimeout: "0s", +} + +// ImportOption represents options used in the Import method. +type ImportOption interface { + configureImport(*importConfig) +} + +func (opt *BackupOption) configureImport(conf *importConfig) { + conf.backup = opt.path +} + +func (opt *ConfigOption) configureImport(conf *importConfig) { + conf.config = opt.path +} + +func (opt *AllowMissingConfigOption) configureImport(conf *importConfig) { + conf.allowMissingConfig = opt.allowMissingConfig +} + +func (opt *LockOption) configureImport(conf *importConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureImport(conf *importConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ReattachOption) configureImport(conf *importConfig) { + conf.reattachInfo = opt.info +} + +func (opt *StateOption) configureImport(conf *importConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureImport(conf *importConfig) { + conf.stateOut = opt.path +} + +func (opt *VarOption) configureImport(conf *importConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *VarFileOption) configureImport(conf *importConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +// Import represents the terraform import subcommand. +func (tf *Terraform) Import(ctx context.Context, address, id string, opts ...ImportOption) error { + cmd, err := tf.importCmd(ctx, address, id, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) importCmd(ctx context.Context, address, id string, opts ...ImportOption) (*exec.Cmd, error) { + c := defaultImportOptions + + for _, o := range opts { + o.configureImport(&c) + } + + args := []string{"import", "-no-color", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.config != "" { + args = append(args, "-config="+c.config) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.allowMissingConfig { + args = append(args, "-allow-missing-config") + } + + // string slice opts: split into separate args + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // required args, always pass + args = append(args, address, id) + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go new file mode 100644 index 00000000..bff9ecd3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go @@ -0,0 +1,179 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type initConfig struct { + backend bool + backendConfig []string + dir string + forceCopy bool + fromModule string + get bool + getPlugins bool + lock bool + lockTimeout string + pluginDir []string + reattachInfo ReattachInfo + reconfigure bool + upgrade bool + verifyPlugins bool +} + +var defaultInitOptions = initConfig{ + backend: true, + forceCopy: false, + get: true, + getPlugins: true, + lock: true, + lockTimeout: "0s", + reconfigure: false, + upgrade: false, + verifyPlugins: true, +} + +// InitOption represents options used in the Init method. +type InitOption interface { + configureInit(*initConfig) +} + +func (opt *BackendOption) configureInit(conf *initConfig) { + conf.backend = opt.backend +} + +func (opt *BackendConfigOption) configureInit(conf *initConfig) { + conf.backendConfig = append(conf.backendConfig, opt.path) +} + +func (opt *DirOption) configureInit(conf *initConfig) { + conf.dir = opt.path +} + +func (opt *FromModuleOption) configureInit(conf *initConfig) { + conf.fromModule = opt.source +} + +func (opt *GetOption) configureInit(conf *initConfig) { + conf.get = opt.get +} + +func (opt *GetPluginsOption) configureInit(conf *initConfig) { + conf.getPlugins = opt.getPlugins +} + +func (opt *LockOption) configureInit(conf *initConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureInit(conf *initConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *PluginDirOption) configureInit(conf *initConfig) { + conf.pluginDir = append(conf.pluginDir, opt.pluginDir) +} + +func (opt *ReattachOption) configureInit(conf *initConfig) { + conf.reattachInfo = opt.info +} + +func (opt *ReconfigureOption) configureInit(conf *initConfig) { + conf.reconfigure = opt.reconfigure +} + +func (opt *UpgradeOption) configureInit(conf *initConfig) { + conf.upgrade = opt.upgrade +} + +func (opt *VerifyPluginsOption) configureInit(conf *initConfig) { + conf.verifyPlugins = opt.verifyPlugins +} + +// Init represents the terraform init subcommand. +func (tf *Terraform) Init(ctx context.Context, opts ...InitOption) error { + cmd, err := tf.initCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd, error) { + c := defaultInitOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption, *VerifyPluginsOption, *GetPluginsOption: + err := tf.compatible(ctx, nil, tf0_15_0) + if err != nil { + return nil, fmt.Errorf("-lock, -lock-timeout, -verify-plugins, and -get-plugins options are no longer available as of Terraform 0.15: %w", err) + } + } + + o.configureInit(&c) + } + + args := []string{"init", "-no-color", "-force-copy", "-input=false"} + + // string opts: only pass if set + if c.fromModule != "" { + args = append(args, "-from-module="+c.fromModule) + } + + // string opts removed in 0.15: pass if set and <0.15 + err := tf.compatible(ctx, nil, tf0_15_0) + if err == nil { + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + } + + // boolean opts: always pass + args = append(args, "-backend="+fmt.Sprint(c.backend)) + args = append(args, "-get="+fmt.Sprint(c.get)) + args = append(args, "-upgrade="+fmt.Sprint(c.upgrade)) + + // boolean opts removed in 0.15: pass if <0.15 + err = tf.compatible(ctx, nil, tf0_15_0) + if err == nil { + args = append(args, "-lock="+fmt.Sprint(c.lock)) + args = append(args, "-get-plugins="+fmt.Sprint(c.getPlugins)) + args = append(args, "-verify-plugins="+fmt.Sprint(c.verifyPlugins)) + } + + // unary flags: pass if true + if c.reconfigure { + args = append(args, "-reconfigure") + } + + // string slice opts: split into separate args + if c.backendConfig != nil { + for _, bc := range c.backendConfig { + args = append(args, "-backend-config="+bc) + } + } + if c.pluginDir != nil { + for _, pd := range c.pluginDir { + args = append(args, "-plugin-dir="+pd) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go new file mode 100644 index 00000000..6d20869b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go @@ -0,0 +1,322 @@ +package tfexec + +import ( + "encoding/json" +) + +// AllowMissingConfigOption represents the -allow-missing-config flag. +type AllowMissingConfigOption struct { + allowMissingConfig bool +} + +// AllowMissingConfig represents the -allow-missing-config flag. +func AllowMissingConfig(allowMissingConfig bool) *AllowMissingConfigOption { + return &AllowMissingConfigOption{allowMissingConfig} +} + +// BackendOption represents the -backend flag. +type BackendOption struct { + backend bool +} + +// Backend represents the -backend flag. +func Backend(backend bool) *BackendOption { + return &BackendOption{backend} +} + +// BackendConfigOption represents the -backend-config flag. +type BackendConfigOption struct { + path string +} + +// BackendConfig represents the -backend-config flag. +func BackendConfig(backendConfig string) *BackendConfigOption { + return &BackendConfigOption{backendConfig} +} + +type BackupOutOption struct { + path string +} + +// BackupOutOption represents the -backup-out flag. +func BackupOut(path string) *BackupOutOption { + return &BackupOutOption{path} +} + +// BackupOption represents the -backup flag. +type BackupOption struct { + path string +} + +// Backup represents the -backup flag. +func Backup(path string) *BackupOption { + return &BackupOption{path} +} + +// DisableBackup is a convenience method for Backup("-"), indicating backup state should be disabled. +func DisableBackup() *BackupOption { + return &BackupOption{"-"} +} + +// ConfigOption represents the -config flag. +type ConfigOption struct { + path string +} + +// Config represents the -config flag. +func Config(path string) *ConfigOption { + return &ConfigOption{path} +} + +// CopyStateOption represents the -state flag for terraform workspace new. This flag is used +// to copy an existing state file in to the new workspace. +type CopyStateOption struct { + path string +} + +// CopyState represents the -state flag for terraform workspace new. This flag is used +// to copy an existing state file in to the new workspace. +func CopyState(path string) *CopyStateOption { + return &CopyStateOption{path} +} + +type DirOption struct { + path string +} + +func Dir(path string) *DirOption { + return &DirOption{path} +} + +type DirOrPlanOption struct { + path string +} + +func DirOrPlan(path string) *DirOrPlanOption { + return &DirOrPlanOption{path} +} + +// DestroyFlagOption represents the -destroy flag. +type DestroyFlagOption struct { + // named to prevent conflict with DestroyOption interface + + destroy bool +} + +// Destroy represents the -destroy flag. +func Destroy(destroy bool) *DestroyFlagOption { + return &DestroyFlagOption{destroy} +} + +type DryRunOption struct { + dryRun bool +} + +// DryRun represents the -dry-run flag. +func DryRun(dryRun bool) *DryRunOption { + return &DryRunOption{dryRun} +} + +type ForceOption struct { + force bool +} + +func Force(force bool) *ForceOption { + return &ForceOption{force} +} + +type ForceCopyOption struct { + forceCopy bool +} + +func ForceCopy(forceCopy bool) *ForceCopyOption { + return &ForceCopyOption{forceCopy} +} + +type FromModuleOption struct { + source string +} + +func FromModule(source string) *FromModuleOption { + return &FromModuleOption{source} +} + +type GetOption struct { + get bool +} + +func Get(get bool) *GetOption { + return &GetOption{get} +} + +type GetPluginsOption struct { + getPlugins bool +} + +func GetPlugins(getPlugins bool) *GetPluginsOption { + return &GetPluginsOption{getPlugins} +} + +// LockOption represents the -lock flag. +type LockOption struct { + lock bool +} + +// Lock represents the -lock flag. +func Lock(lock bool) *LockOption { + return &LockOption{lock} +} + +// LockTimeoutOption represents the -lock-timeout flag. +type LockTimeoutOption struct { + timeout string +} + +// LockTimeout represents the -lock-timeout flag. +func LockTimeout(lockTimeout string) *LockTimeoutOption { + // TODO: should this just use a duration instead? + return &LockTimeoutOption{lockTimeout} +} + +type OutOption struct { + path string +} + +func Out(path string) *OutOption { + return &OutOption{path} +} + +type ParallelismOption struct { + parallelism int +} + +func Parallelism(n int) *ParallelismOption { + return &ParallelismOption{n} +} + +type PluginDirOption struct { + pluginDir string +} + +func PluginDir(pluginDir string) *PluginDirOption { + return &PluginDirOption{pluginDir} +} + +type ReattachInfo map[string]ReattachConfig + +// ReattachConfig holds the information Terraform needs to be able to attach +// itself to a provider process, so it can drive the process. +type ReattachConfig struct { + Protocol string + Pid int + Test bool + Addr ReattachConfigAddr +} + +// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. +type ReattachConfigAddr struct { + Network string + String string +} + +type ReattachOption struct { + info ReattachInfo +} + +func (info ReattachInfo) marshalString() (string, error) { + reattachStr, err := json.Marshal(info) + if err != nil { + return "", err + } + return string(reattachStr), nil +} + +func Reattach(info ReattachInfo) *ReattachOption { + return &ReattachOption{info} +} + +type ReconfigureOption struct { + reconfigure bool +} + +func Reconfigure(reconfigure bool) *ReconfigureOption { + return &ReconfigureOption{reconfigure} +} + +type RecursiveOption struct { + recursive bool +} + +func Recursive(r bool) *RecursiveOption { + return &RecursiveOption{r} +} + +type RefreshOption struct { + refresh bool +} + +func Refresh(refresh bool) *RefreshOption { + return &RefreshOption{refresh} +} + +type StateOption struct { + path string +} + +// State represents the -state flag. +// +// Deprecated: The -state CLI flag is a legacy flag and should not be used. +// If you need a different state file for every run, you can instead use the +// local backend. +// See https://github.com/hashicorp/terraform/issues/25920#issuecomment-676560799 +func State(path string) *StateOption { + return &StateOption{path} +} + +type StateOutOption struct { + path string +} + +func StateOut(path string) *StateOutOption { + return &StateOutOption{path} +} + +type TargetOption struct { + target string +} + +func Target(resource string) *TargetOption { + return &TargetOption{resource} +} + +type UpgradeOption struct { + upgrade bool +} + +func Upgrade(upgrade bool) *UpgradeOption { + return &UpgradeOption{upgrade} +} + +type VarOption struct { + assignment string +} + +func Var(assignment string) *VarOption { + return &VarOption{assignment} +} + +type VarFileOption struct { + path string +} + +func VarFile(path string) *VarFileOption { + return &VarFileOption{path} +} + +type VerifyPluginsOption struct { + verifyPlugins bool +} + +func VerifyPlugins(verifyPlugins bool) *VerifyPluginsOption { + return &VerifyPluginsOption{verifyPlugins} +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go new file mode 100644 index 00000000..b16b8b72 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go @@ -0,0 +1,63 @@ +package tfexec + +import ( + "context" + "encoding/json" + "os/exec" +) + +type outputConfig struct { + state string + json bool +} + +var defaultOutputOptions = outputConfig{} + +// OutputOption represents options used in the Output method. +type OutputOption interface { + configureOutput(*outputConfig) +} + +func (opt *StateOption) configureOutput(conf *outputConfig) { + conf.state = opt.path +} + +// OutputMeta represents the JSON output of 'terraform output -json', +// which resembles state format version 3 due to a historical accident. +// Please see hashicorp/terraform/command/output.go. +// TODO KEM: Should this type be in terraform-json? +type OutputMeta struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type"` + Value json.RawMessage `json:"value"` +} + +// Output represents the terraform output subcommand. +func (tf *Terraform) Output(ctx context.Context, opts ...OutputOption) (map[string]OutputMeta, error) { + outputCmd := tf.outputCmd(ctx, opts...) + + outputs := map[string]OutputMeta{} + err := tf.runTerraformCmdJSON(ctx, outputCmd, &outputs) + if err != nil { + return nil, err + } + + return outputs, nil +} + +func (tf *Terraform) outputCmd(ctx context.Context, opts ...OutputOption) *exec.Cmd { + c := defaultOutputOptions + + for _, o := range opts { + o.configureOutput(&c) + } + + args := []string{"output", "-no-color", "-json"} + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go new file mode 100644 index 00000000..bfe77db7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go @@ -0,0 +1,166 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type planConfig struct { + destroy bool + dir string + lock bool + lockTimeout string + out string + parallelism int + reattachInfo ReattachInfo + refresh bool + state string + targets []string + vars []string + varFiles []string +} + +var defaultPlanOptions = planConfig{ + destroy: false, + lock: true, + lockTimeout: "0s", + parallelism: 10, + refresh: true, +} + +// PlanOption represents options used in the Plan method. +type PlanOption interface { + configurePlan(*planConfig) +} + +func (opt *DirOption) configurePlan(conf *planConfig) { + conf.dir = opt.path +} + +func (opt *VarFileOption) configurePlan(conf *planConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +func (opt *VarOption) configurePlan(conf *planConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *TargetOption) configurePlan(conf *planConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *StateOption) configurePlan(conf *planConfig) { + conf.state = opt.path +} + +func (opt *ReattachOption) configurePlan(conf *planConfig) { + conf.reattachInfo = opt.info +} + +func (opt *RefreshOption) configurePlan(conf *planConfig) { + conf.refresh = opt.refresh +} + +func (opt *ParallelismOption) configurePlan(conf *planConfig) { + conf.parallelism = opt.parallelism +} + +func (opt *OutOption) configurePlan(conf *planConfig) { + conf.out = opt.path +} + +func (opt *LockTimeoutOption) configurePlan(conf *planConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *LockOption) configurePlan(conf *planConfig) { + conf.lock = opt.lock +} + +func (opt *DestroyFlagOption) configurePlan(conf *planConfig) { + conf.destroy = opt.destroy +} + +// Plan executes `terraform plan` with the specified options and waits for it +// to complete. +// +// The returned boolean is false when the plan diff is empty (no changes) and +// true when the plan diff is non-empty (changes present). +// +// The returned error is nil if `terraform plan` has been executed and exits +// with either 0 or 2. +func (tf *Terraform) Plan(ctx context.Context, opts ...PlanOption) (bool, error) { + cmd, err := tf.planCmd(ctx, opts...) + if err != nil { + return false, err + } + err = tf.runTerraformCmd(ctx, cmd) + if err != nil && cmd.ProcessState.ExitCode() == 2 { + return true, nil + } + return false, err +} + +func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) { + c := defaultPlanOptions + + for _, o := range opts { + o.configurePlan(&c) + } + + args := []string{"plan", "-no-color", "-input=false", "-detailed-exitcode"} + + // string opts: only pass if set + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.out != "" { + args = append(args, "-out="+c.out) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) + args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + + // unary flags: pass if true + if c.destroy { + args = append(args, "-destroy") + } + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go new file mode 100644 index 00000000..52efc5db --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go @@ -0,0 +1,33 @@ +package tfexec + +import ( + "context" + "os/exec" + + tfjson "github.com/hashicorp/terraform-json" +) + +// ProvidersSchema represents the terraform providers schema -json subcommand. +func (tf *Terraform) ProvidersSchema(ctx context.Context) (*tfjson.ProviderSchemas, error) { + schemaCmd := tf.providersSchemaCmd(ctx) + + var ret tfjson.ProviderSchemas + err := tf.runTerraformCmdJSON(ctx, schemaCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil +} + +func (tf *Terraform) providersSchemaCmd(ctx context.Context, args ...string) *exec.Cmd { + allArgs := []string{"providers", "schema", "-json", "-no-color"} + allArgs = append(allArgs, args...) + + return tf.buildTerraformCmd(ctx, nil, allArgs...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go new file mode 100644 index 00000000..78f6b4b5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go @@ -0,0 +1,137 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type refreshConfig struct { + backup string + dir string + lock bool + lockTimeout string + reattachInfo ReattachInfo + state string + stateOut string + targets []string + vars []string + varFiles []string +} + +var defaultRefreshOptions = refreshConfig{ + lock: true, + lockTimeout: "0s", +} + +// RefreshCmdOption represents options used in the Refresh method. +type RefreshCmdOption interface { + configureRefresh(*refreshConfig) +} + +func (opt *BackupOption) configureRefresh(conf *refreshConfig) { + conf.backup = opt.path +} + +func (opt *DirOption) configureRefresh(conf *refreshConfig) { + conf.dir = opt.path +} + +func (opt *LockOption) configureRefresh(conf *refreshConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureRefresh(conf *refreshConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ReattachOption) configureRefresh(conf *refreshConfig) { + conf.reattachInfo = opt.info +} + +func (opt *StateOption) configureRefresh(conf *refreshConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureRefresh(conf *refreshConfig) { + conf.stateOut = opt.path +} + +func (opt *TargetOption) configureRefresh(conf *refreshConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *VarOption) configureRefresh(conf *refreshConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *VarFileOption) configureRefresh(conf *refreshConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +// Refresh represents the terraform refresh subcommand. +func (tf *Terraform) Refresh(ctx context.Context, opts ...RefreshCmdOption) error { + cmd, err := tf.refreshCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { + c := defaultRefreshOptions + + for _, o := range opts { + o.configureRefresh(&c) + } + + args := []string{"refresh", "-no-color", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go new file mode 100644 index 00000000..a8d67f1a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go @@ -0,0 +1,196 @@ +package tfexec + +import ( + "bytes" + "context" + "fmt" + "os/exec" + + tfjson "github.com/hashicorp/terraform-json" +) + +type showConfig struct { + reattachInfo ReattachInfo +} + +var defaultShowOptions = showConfig{} + +type ShowOption interface { + configureShow(*showConfig) +} + +func (opt *ReattachOption) configureShow(conf *showConfig) { + conf.reattachInfo = opt.info +} + +// Show reads the default state path and outputs the state. +// To read a state or plan file, ShowState or ShowPlan must be used instead. +func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.State, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err) + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, true, mergeEnv) + + var ret tfjson.State + ret.UseJSONNumber(true) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil +} + +// ShowStateFile reads a given state file and outputs the state. +func (tf *Terraform) ShowStateFile(ctx context.Context, statePath string, opts ...ShowOption) (*tfjson.State, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err) + } + + if statePath == "" { + return nil, fmt.Errorf("statePath cannot be blank: use Show() if not passing statePath") + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, true, mergeEnv, statePath) + + var ret tfjson.State + ret.UseJSONNumber(true) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil +} + +// ShowPlanFile reads a given plan file and outputs the plan. +func (tf *Terraform) ShowPlanFile(ctx context.Context, planPath string, opts ...ShowOption) (*tfjson.Plan, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err) + } + + if planPath == "" { + return nil, fmt.Errorf("planPath cannot be blank: use Show() if not passing planPath") + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, true, mergeEnv, planPath) + + var ret tfjson.Plan + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil + +} + +// ShowPlanFileRaw reads a given plan file and outputs the plan in a +// human-friendly, opaque format. +func (tf *Terraform) ShowPlanFileRaw(ctx context.Context, planPath string, opts ...ShowOption) (string, error) { + if planPath == "" { + return "", fmt.Errorf("planPath cannot be blank: use Show() if not passing planPath") + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return "", err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, false, mergeEnv, planPath) + + var ret bytes.Buffer + showCmd.Stdout = &ret + err := tf.runTerraformCmd(ctx, showCmd) + if err != nil { + return "", err + } + + return ret.String(), nil + +} + +func (tf *Terraform) showCmd(ctx context.Context, jsonOutput bool, mergeEnv map[string]string, args ...string) *exec.Cmd { + allArgs := []string{"show"} + if jsonOutput { + allArgs = append(allArgs, "-json") + } + allArgs = append(allArgs, "-no-color") + allArgs = append(allArgs, args...) + + return tf.buildTerraformCmd(ctx, mergeEnv, allArgs...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go new file mode 100644 index 00000000..fc7eecf8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go @@ -0,0 +1,105 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type stateMvConfig struct { + backup string + backupOut string + dryRun bool + lock bool + lockTimeout string + state string + stateOut string +} + +var defaultStateMvOptions = stateMvConfig{ + lock: true, + lockTimeout: "0s", +} + +// StateMvCmdOption represents options used in the Refresh method. +type StateMvCmdOption interface { + configureStateMv(*stateMvConfig) +} + +func (opt *BackupOption) configureStateMv(conf *stateMvConfig) { + conf.backup = opt.path +} + +func (opt *BackupOutOption) configureStateMv(conf *stateMvConfig) { + conf.backupOut = opt.path +} + +func (opt *DryRunOption) configureStateMv(conf *stateMvConfig) { + conf.dryRun = opt.dryRun +} + +func (opt *LockOption) configureStateMv(conf *stateMvConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStateMv(conf *stateMvConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureStateMv(conf *stateMvConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureStateMv(conf *stateMvConfig) { + conf.stateOut = opt.path +} + +// StateMv represents the terraform state mv subcommand. +func (tf *Terraform) StateMv(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) error { + cmd, err := tf.stateMvCmd(ctx, source, destination, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) stateMvCmd(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) (*exec.Cmd, error) { + c := defaultStateMvOptions + + for _, o := range opts { + o.configureStateMv(&c) + } + + args := []string{"state", "mv", "-no-color"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.backupOut != "" { + args = append(args, "-backup-out="+c.backupOut) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.dryRun { + args = append(args, "-dry-run") + } + + // positional arguments + args = append(args, source) + args = append(args, destination) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go new file mode 100644 index 00000000..0c5dd666 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go @@ -0,0 +1,104 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type stateRmConfig struct { + backup string + backupOut string + dryRun bool + lock bool + lockTimeout string + state string + stateOut string +} + +var defaultStateRmOptions = stateRmConfig{ + lock: true, + lockTimeout: "0s", +} + +// StateRmCmdOption represents options used in the Refresh method. +type StateRmCmdOption interface { + configureStateRm(*stateRmConfig) +} + +func (opt *BackupOption) configureStateRm(conf *stateRmConfig) { + conf.backup = opt.path +} + +func (opt *BackupOutOption) configureStateRm(conf *stateRmConfig) { + conf.backupOut = opt.path +} + +func (opt *DryRunOption) configureStateRm(conf *stateRmConfig) { + conf.dryRun = opt.dryRun +} + +func (opt *LockOption) configureStateRm(conf *stateRmConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStateRm(conf *stateRmConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureStateRm(conf *stateRmConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureStateRm(conf *stateRmConfig) { + conf.stateOut = opt.path +} + +// StateRm represents the terraform state rm subcommand. +func (tf *Terraform) StateRm(ctx context.Context, address string, opts ...StateRmCmdOption) error { + cmd, err := tf.stateRmCmd(ctx, address, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) stateRmCmd(ctx context.Context, address string, opts ...StateRmCmdOption) (*exec.Cmd, error) { + c := defaultStateRmOptions + + for _, o := range opts { + o.configureStateRm(&c) + } + + args := []string{"state", "rm", "-no-color"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.backupOut != "" { + args = append(args, "-backup-out="+c.backupOut) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.dryRun { + args = append(args, "-dry-run") + } + + // positional arguments + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go new file mode 100644 index 00000000..74787af0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -0,0 +1,165 @@ +package tfexec + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "sync" + + "github.com/hashicorp/go-version" +) + +type printfer interface { + Printf(format string, v ...interface{}) +} + +// Terraform represents the Terraform CLI executable and working directory. +// +// Typically this is constructed against the root module of a Terraform configuration +// but you can override paths used in some commands depending on the available +// options. +// +// All functions that execute CLI commands take a context.Context. It should be noted that +// exec.Cmd.Run will not return context.DeadlineExceeded or context.Canceled by default, we +// have augmented our wrapped errors to respond true to errors.Is for context.DeadlineExceeded +// and context.Canceled if those are present on the context when the error is parsed. See +// https://github.com/golang/go/issues/21880 for more about the Go limitations. +// +// By default, the instance inherits the environment from the calling code (using os.Environ) +// but it ignores certain environment variables that are managed within the code and prohibits +// setting them through SetEnv: +// +// - TF_APPEND_USER_AGENT +// - TF_IN_AUTOMATION +// - TF_INPUT +// - TF_LOG +// - TF_LOG_PATH +// - TF_REATTACH_PROVIDERS +// - TF_DISABLE_PLUGIN_TLS +// - TF_SKIP_PROVIDER_VERIFY +type Terraform struct { + execPath string + workingDir string + appendUserAgent string + disablePluginTLS bool + skipProviderVerify bool + env map[string]string + + stdout io.Writer + stderr io.Writer + logger printfer + logPath string + + versionLock sync.Mutex + execVersion *version.Version + provVersions map[string]*version.Version +} + +// NewTerraform returns a Terraform struct with default values for all fields. +// If a blank execPath is supplied, NewTerraform will attempt to locate an +// appropriate binary on the system PATH. +func NewTerraform(workingDir string, execPath string) (*Terraform, error) { + if workingDir == "" { + return nil, fmt.Errorf("Terraform cannot be initialised with empty workdir") + } + + if _, err := os.Stat(workingDir); err != nil { + return nil, fmt.Errorf("error initialising Terraform with workdir %s: %s", workingDir, err) + } + + if execPath == "" { + err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the tfinstall package.") + return nil, &ErrNoSuitableBinary{ + err: err, + } + } + tf := Terraform{ + execPath: execPath, + workingDir: workingDir, + env: nil, // explicit nil means copy os.Environ + logger: log.New(ioutil.Discard, "", 0), + } + + return &tf, nil +} + +// SetEnv allows you to override environment variables, this should not be used for any well known +// Terraform environment variables that are already covered in options. Pass nil to copy the values +// from os.Environ. Attempting to set environment variables that should be managed manually will +// result in ErrManualEnvVar being returned. +func (tf *Terraform) SetEnv(env map[string]string) error { + prohibited := ProhibitedEnv(env) + if len(prohibited) > 0 { + // just error on the first instance + return &ErrManualEnvVar{prohibited[0]} + } + + tf.env = env + return nil +} + +// SetLogger specifies a logger for tfexec to use. +func (tf *Terraform) SetLogger(logger printfer) { + tf.logger = logger +} + +// SetStdout specifies a writer to stream stdout to for every command. +// +// This should be used for information or logging purposes only, not control +// flow. Any parsing necessary should be added as functionality to this package. +func (tf *Terraform) SetStdout(w io.Writer) { + tf.stdout = w +} + +// SetStderr specifies a writer to stream stderr to for every command. +// +// This should be used for information or logging purposes only, not control +// flow. Any parsing necessary should be added as functionality to this package. +func (tf *Terraform) SetStderr(w io.Writer) { + tf.stderr = w +} + +// SetLogPath sets the TF_LOG_PATH environment variable for Terraform CLI +// execution. +func (tf *Terraform) SetLogPath(path string) error { + tf.logPath = path + return nil +} + +// SetAppendUserAgent sets the TF_APPEND_USER_AGENT environment variable for +// Terraform CLI execution. +func (tf *Terraform) SetAppendUserAgent(ua string) error { + tf.appendUserAgent = ua + return nil +} + +// SetDisablePluginTLS sets the TF_DISABLE_PLUGIN_TLS environment variable for +// Terraform CLI execution. +func (tf *Terraform) SetDisablePluginTLS(disabled bool) error { + tf.disablePluginTLS = disabled + return nil +} + +// SetSkipProviderVerify sets the TF_SKIP_PROVIDER_VERIFY environment variable +// for Terraform CLI execution. This is no longer used in 0.13.0 and greater. +func (tf *Terraform) SetSkipProviderVerify(skip bool) error { + err := tf.compatible(context.Background(), nil, tf0_13_0) + if err != nil { + return err + } + tf.skipProviderVerify = skip + return nil +} + +// WorkingDir returns the working directory for Terraform. +func (tf *Terraform) WorkingDir() string { + return tf.workingDir +} + +// ExecPath returns the path to the Terraform executable. +func (tf *Terraform) ExecPath() string { + return tf.execPath +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go new file mode 100644 index 00000000..e55237a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go @@ -0,0 +1,80 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type upgrade012Config struct { + dir string + force bool + + reattachInfo ReattachInfo +} + +var defaultUpgrade012Options = upgrade012Config{ + force: false, +} + +// Upgrade012Option represents options used in the Destroy method. +type Upgrade012Option interface { + configureUpgrade012(*upgrade012Config) +} + +func (opt *DirOption) configureUpgrade012(conf *upgrade012Config) { + conf.dir = opt.path +} + +func (opt *ForceOption) configureUpgrade012(conf *upgrade012Config) { + conf.force = opt.force +} + +func (opt *ReattachOption) configureUpgrade012(conf *upgrade012Config) { + conf.reattachInfo = opt.info +} + +// Upgrade012 represents the terraform 0.12upgrade subcommand. +func (tf *Terraform) Upgrade012(ctx context.Context, opts ...Upgrade012Option) error { + cmd, err := tf.upgrade012Cmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) upgrade012Cmd(ctx context.Context, opts ...Upgrade012Option) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_12_0, tf0_13_0) + if err != nil { + return nil, fmt.Errorf("terraform 0.12upgrade is only supported in 0.12 releases: %w", err) + } + + c := defaultUpgrade012Options + + for _, o := range opts { + o.configureUpgrade012(&c) + } + + args := []string{"0.12upgrade", "-no-color", "-yes"} + + // boolean opts: only pass if set + if c.force { + args = append(args, "-force") + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go new file mode 100644 index 00000000..756eccd7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go @@ -0,0 +1,44 @@ +package tfexec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" +) + +// Validate represents the validate subcommand to the Terraform CLI. The -json +// flag support was added in 0.12.0, so this will not work on earlier versions. +func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform validate -json was added in 0.12.0: %w", err) + } + + cmd := tf.buildTerraformCmd(ctx, nil, "validate", "-no-color", "-json") + + var outbuf = bytes.Buffer{} + cmd.Stdout = &outbuf + + err = tf.runTerraformCmd(ctx, cmd) + // TODO: this command should not exit 1 if you pass -json as its hard to differentiate other errors + if err != nil && cmd.ProcessState.ExitCode() != 1 { + return nil, err + } + + var ret tfjson.ValidateOutput + // TODO: ret.UseJSONNumber(true) validate output should support JSON numbers + jsonErr := json.Unmarshal(outbuf.Bytes(), &ret) + if jsonErr != nil { + // the original call was possibly bad, if it has an error, actually just return that + if err != nil { + return nil, err + } + + return nil, jsonErr + } + + return &ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go new file mode 100644 index 00000000..9e05b056 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -0,0 +1,155 @@ +package tfexec + +import ( + "bytes" + "context" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/go-version" +) + +var ( + tf0_7_7 = version.Must(version.NewVersion("0.7.7")) + tf0_12_0 = version.Must(version.NewVersion("0.12.0")) + tf0_13_0 = version.Must(version.NewVersion("0.13.0")) + tf0_15_0 = version.Must(version.NewVersion("0.15.0")) +) + +// Version returns structured output from the terraform version command including both the Terraform CLI version +// and any initialized provider versions. This will read cached values when present unless the skipCache parameter +// is set to true. +func (tf *Terraform) Version(ctx context.Context, skipCache bool) (tfVersion *version.Version, providerVersions map[string]*version.Version, err error) { + tf.versionLock.Lock() + defer tf.versionLock.Unlock() + + if tf.execVersion == nil || skipCache { + tf.execVersion, tf.provVersions, err = tf.version(ctx) + if err != nil { + return nil, nil, err + } + } + + return tf.execVersion, tf.provVersions, nil +} + +// version does not use the locking on the Terraform instance and should probably not be used directly, prefer Version. +func (tf *Terraform) version(ctx context.Context) (*version.Version, map[string]*version.Version, error) { + // TODO: 0.13.0-beta2? and above supports a `-json` on the version command, should add support + // for that here and fallback to string parsing + + versionCmd := tf.buildTerraformCmd(ctx, nil, "version") + + var outBuf bytes.Buffer + versionCmd.Stdout = &outBuf + + err := tf.runTerraformCmd(ctx, versionCmd) + if err != nil { + return nil, nil, err + } + + tfVersion, providerVersions, err := parseVersionOutput(outBuf.String()) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse version: %w", err) + } + + return tfVersion, providerVersions, nil +} + +var ( + simpleVersionRe = `v?(?P[0-9]+(?:\.[0-9]+)*(?:-[A-Za-z0-9\.]+)?)` + + versionOutputRe = regexp.MustCompile(`Terraform ` + simpleVersionRe) + providerVersionOutputRe = regexp.MustCompile(`(\n\+ provider[\. ](?P\S+) ` + simpleVersionRe + `)`) +) + +func parseVersionOutput(stdout string) (*version.Version, map[string]*version.Version, error) { + stdout = strings.TrimSpace(stdout) + + submatches := versionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + allSubmatches := providerVersionOutputRe.FindAllStringSubmatch(stdout, -1) + provV := map[string]*version.Version{} + + for _, submatches := range allSubmatches { + if len(submatches) != 4 { + return nil, nil, fmt.Errorf("unexpected number of provider version matches %d for %s", len(submatches), stdout) + } + + v, err := version.NewVersion(submatches[3]) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse provider version %q: %w", submatches[3], err) + } + + provV[submatches[2]] = v + } + + return v, provV, err +} + +func errorVersionString(v *version.Version) string { + if v == nil { + return "-" + } + return v.String() +} + +// compatible asserts compatibility of the cached terraform version with the executable, and returns a well known error if not. +func (tf *Terraform) compatible(ctx context.Context, minInclusive *version.Version, maxExclusive *version.Version) error { + tfv, _, err := tf.Version(ctx, false) + if err != nil { + return err + } + if ok := versionInRange(tfv, minInclusive, maxExclusive); !ok { + return &ErrVersionMismatch{ + MinInclusive: errorVersionString(minInclusive), + MaxExclusive: errorVersionString(maxExclusive), + Actual: errorVersionString(tfv), + } + } + + return nil +} + +func stripPrereleaseAndMeta(v *version.Version) *version.Version { + if v == nil { + return nil + } + segs := []string{} + for _, s := range v.Segments() { + segs = append(segs, strconv.Itoa(s)) + } + vs := strings.Join(segs, ".") + clean, _ := version.NewVersion(vs) + return clean +} + +// versionInRange checks compatibility of the Terraform version. The minimum is inclusive and the max +// is exclusive, equivalent to min <= expected version < max. +// +// Pre-release information is ignored for comparison. +func versionInRange(tfv *version.Version, minInclusive *version.Version, maxExclusive *version.Version) bool { + if minInclusive == nil && maxExclusive == nil { + return true + } + tfv = stripPrereleaseAndMeta(tfv) + minInclusive = stripPrereleaseAndMeta(minInclusive) + maxExclusive = stripPrereleaseAndMeta(maxExclusive) + if minInclusive != nil && !tfv.GreaterThanOrEqual(minInclusive) { + return false + } + if maxExclusive != nil && !tfv.LessThan(maxExclusive) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go new file mode 100644 index 00000000..b8d03094 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go @@ -0,0 +1,47 @@ +package tfexec + +import ( + "bytes" + "context" + "strings" +) + +// WorkspaceList represents the workspace list subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceList(ctx context.Context) ([]string, string, error) { + // TODO: [DIR] param option + wlCmd := tf.buildTerraformCmd(ctx, nil, "workspace", "list", "-no-color") + + var outBuf bytes.Buffer + wlCmd.Stdout = &outBuf + + err := tf.runTerraformCmd(ctx, wlCmd) + if err != nil { + return nil, "", err + } + + ws, current := parseWorkspaceList(outBuf.String()) + + return ws, current, nil +} + +const currentWorkspacePrefix = "* " + +func parseWorkspaceList(stdout string) ([]string, string) { + lines := strings.Split(stdout, "\n") + + current := "" + workspaces := []string{} + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + if strings.HasPrefix(line, currentWorkspacePrefix) { + line = strings.TrimPrefix(line, currentWorkspacePrefix) + current = line + } + workspaces = append(workspaces, line) + } + + return workspaces, current +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go new file mode 100644 index 00000000..2e05ffdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go @@ -0,0 +1,83 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type workspaceNewConfig struct { + lock bool + lockTimeout string + copyState string +} + +var defaultWorkspaceNewOptions = workspaceNewConfig{ + lock: true, + lockTimeout: "0s", +} + +// WorkspaceNewCmdOption represents options that are applicable to the WorkspaceNew method. +type WorkspaceNewCmdOption interface { + configureWorkspaceNew(*workspaceNewConfig) +} + +func (opt *LockOption) configureWorkspaceNew(conf *workspaceNewConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureWorkspaceNew(conf *workspaceNewConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *CopyStateOption) configureWorkspaceNew(conf *workspaceNewConfig) { + conf.copyState = opt.path +} + +// WorkspaceNew represents the workspace new subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceNew(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) error { + cmd, err := tf.workspaceNewCmd(ctx, workspace, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) workspaceNewCmd(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) (*exec.Cmd, error) { + // TODO: [DIR] param option + + c := defaultWorkspaceNewOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace new in Terraform 0.12: %w", err) + } + } + + o.configureWorkspaceNew(&c) + } + + args := []string{"workspace", "new", "-no-color"} + + if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceNewOptions.lockTimeout { + // only pass if not default, so we don't need to worry about the 0.11 version check + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if !c.lock { + // only pass if false, so we don't need to worry about the 0.11 version check + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + } + if c.copyState != "" { + args = append(args, "-state="+c.copyState) + } + + args = append(args, workspace) + + cmd := tf.buildTerraformCmd(ctx, nil, args...) + + return cmd, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go new file mode 100644 index 00000000..5a51330f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go @@ -0,0 +1,10 @@ +package tfexec + +import "context" + +// WorkspaceSelect represents the workspace select subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceSelect(ctx context.Context, workspace string) error { + // TODO: [DIR] param option + + return tf.runTerraformCmd(ctx, tf.buildTerraformCmd(ctx, nil, "workspace", "select", "-no-color", workspace)) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/doc.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/doc.go new file mode 100644 index 00000000..e48ea412 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/doc.go @@ -0,0 +1,4 @@ +// Package tfinstall offers multiple strategies for finding and/or installing +// a binary version of Terraform. Some of the strategies can also authenticate +// the source of the binary as an official HashiCorp release. +package tfinstall diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/download.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/download.go new file mode 100644 index 00000000..30e15e87 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/download.go @@ -0,0 +1,125 @@ +package tfinstall + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/go-getter" + "golang.org/x/crypto/openpgp" +) + +func ensureInstallDir(installDir string) (string, error) { + if installDir == "" { + return ioutil.TempDir("", "tfexec") + } + + if _, err := os.Stat(installDir); err != nil { + return "", fmt.Errorf("could not access directory %s for installing Terraform: %w", installDir, err) + } + + return installDir, nil +} + +func downloadWithVerification(ctx context.Context, tfVersion string, installDir string, appendUserAgent string) (string, error) { + osName := runtime.GOOS + archName := runtime.GOARCH + + // setup: ensure we have a place to put our downloaded terraform binary + tfDir, err := ensureInstallDir(installDir) + if err != nil { + return "", err + } + + httpGetter := &getter.HttpGetter{ + Netrc: true, + Client: newHTTPClient(appendUserAgent), + } + client := getter.Client{ + Ctx: ctx, + Getters: map[string]getter.Getter{ + "https": httpGetter, + }, + } + client.Mode = getter.ClientModeAny + + // firstly, download and verify the signature of the checksum file + + sumsTmpDir, err := ioutil.TempDir("", "tfinstall") + if err != nil { + return "", err + } + defer os.RemoveAll(sumsTmpDir) + + sumsFilename := "terraform_" + tfVersion + "_SHA256SUMS" + sumsSigFilename := sumsFilename + ".sig" + + sumsURL := fmt.Sprintf("%s/%s/%s", baseURL, tfVersion, sumsFilename) + sumsSigURL := fmt.Sprintf("%s/%s/%s", baseURL, tfVersion, sumsSigFilename) + + client.Src = sumsURL + client.Dst = sumsTmpDir + err = client.Get() + if err != nil { + return "", fmt.Errorf("error fetching checksums: %s", err) + } + + client.Src = sumsSigURL + err = client.Get() + if err != nil { + return "", fmt.Errorf("error fetching checksums signature: %s", err) + } + + sumsPath := filepath.Join(sumsTmpDir, sumsFilename) + sumsSigPath := filepath.Join(sumsTmpDir, sumsSigFilename) + + err = verifySumsSignature(sumsPath, sumsSigPath) + if err != nil { + return "", err + } + + // secondly, download Terraform itself, verifying the checksum + url := tfURL(tfVersion, osName, archName) + client.Src = url + client.Dst = tfDir + client.Mode = getter.ClientModeDir + err = client.Get() + if err != nil { + return "", err + } + + return filepath.Join(tfDir, "terraform"), nil +} + +// verifySumsSignature downloads SHA256SUMS and SHA256SUMS.sig and verifies +// the signature using the HashiCorp public key. +func verifySumsSignature(sumsPath, sumsSigPath string) error { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashicorpPublicKey)) + if err != nil { + return err + } + data, err := os.Open(sumsPath) + if err != nil { + return err + } + sig, err := os.Open(sumsSigPath) + if err != nil { + return err + } + _, err = openpgp.CheckDetachedSignature(el, data, sig) + + return err +} + +func tfURL(tfVersion, osName, archName string) string { + sumsFilename := "terraform_" + tfVersion + "_SHA256SUMS" + sumsURL := fmt.Sprintf("%s/%s/%s", baseURL, tfVersion, sumsFilename) + return fmt.Sprintf( + "%s/%s/terraform_%s_%s_%s.zip?checksum=file:%s", + baseURL, tfVersion, tfVersion, osName, archName, sumsURL, + ) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_path.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_path.go new file mode 100644 index 00000000..010cc501 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_path.go @@ -0,0 +1,27 @@ +package tfinstall + +import ( + "context" + "os" +) + +type ExactPathOption struct { + execPath string +} + +var _ ExecPathFinder = &ExactPathOption{} + +func ExactPath(execPath string) *ExactPathOption { + opt := &ExactPathOption{ + execPath: execPath, + } + return opt +} + +func (opt *ExactPathOption) ExecPath(context.Context) (string, error) { + if _, err := os.Stat(opt.execPath); err != nil { + // fall through to the next strategy if the local path does not exist + return "", nil + } + return opt.execPath, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_version.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_version.go new file mode 100644 index 00000000..afcb9aca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/exact_version.go @@ -0,0 +1,35 @@ +package tfinstall + +import ( + "context" + + "github.com/hashicorp/go-version" +) + +type ExactVersionOption struct { + tfVersion string + installDir string + + UserAgent string +} + +var _ ExecPathFinder = &ExactVersionOption{} + +func ExactVersion(tfVersion string, installDir string) *ExactVersionOption { + opt := &ExactVersionOption{ + tfVersion: tfVersion, + installDir: installDir, + } + + return opt +} + +func (opt *ExactVersionOption) ExecPath(ctx context.Context) (string, error) { + // validate version + _, err := version.NewVersion(opt.tfVersion) + if err != nil { + return "", err + } + + return downloadWithVerification(ctx, opt.tfVersion, opt.installDir, opt.UserAgent) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/http.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/http.go new file mode 100644 index 00000000..70d95a6e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/http.go @@ -0,0 +1,37 @@ +package tfinstall + +import ( + "fmt" + "net/http" + "os" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + + intversion "github.com/hashicorp/terraform-exec/internal/version" +) + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + return rt.inner.RoundTrip(req) +} + +func newHTTPClient(appendUA string) *http.Client { + appendUA = strings.TrimSpace(appendUA + " " + os.Getenv("TF_APPEND_USER_AGENT")) + userAgent := strings.TrimSpace(fmt.Sprintf("HashiCorp-tfinstall/%s %s", intversion.ModuleVersion(), appendUA)) + + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: userAgent, + inner: cli.Transport, + } + + return cli +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/latest_version.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/latest_version.go new file mode 100644 index 00000000..f01735ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/latest_version.go @@ -0,0 +1,51 @@ +package tfinstall + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-checkpoint" +) + +type LatestVersionOption struct { + forceCheckpoint bool + installDir string + + UserAgent string +} + +var _ ExecPathFinder = &LatestVersionOption{} + +func LatestVersion(installDir string, forceCheckpoint bool) *LatestVersionOption { + opt := &LatestVersionOption{ + forceCheckpoint: forceCheckpoint, + installDir: installDir, + } + + return opt +} + +func (opt *LatestVersionOption) ExecPath(ctx context.Context) (string, error) { + v, err := latestVersion(opt.forceCheckpoint) + if err != nil { + return "", err + } + + return downloadWithVerification(ctx, v, opt.installDir, opt.UserAgent) +} + +func latestVersion(forceCheckpoint bool) (string, error) { + resp, err := checkpoint.Check(&checkpoint.CheckParams{ + Product: "terraform", + Force: forceCheckpoint, + }) + if err != nil { + return "", err + } + + if resp.CurrentVersion == "" { + return "", fmt.Errorf("could not determine latest version of terraform using checkpoint: CHECKPOINT_DISABLE may be set") + } + + return resp.CurrentVersion, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/look_path.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/look_path.go new file mode 100644 index 00000000..2ebec09b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/look_path.go @@ -0,0 +1,30 @@ +package tfinstall + +import ( + "context" + "log" + "os/exec" +) + +type LookPathOption struct { +} + +var _ ExecPathFinder = &LookPathOption{} + +func LookPath() *LookPathOption { + opt := &LookPathOption{} + + return opt +} + +func (opt *LookPathOption) ExecPath(context.Context) (string, error) { + p, err := exec.LookPath("terraform") + if err != nil { + if notFoundErr, ok := err.(*exec.Error); ok && notFoundErr.Err == exec.ErrNotFound { + log.Printf("[WARN] could not locate a terraform executable on system path; continuing") + return "", nil + } + return "", err + } + return p, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go new file mode 100644 index 00000000..a14f88c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go @@ -0,0 +1,32 @@ +package tfinstall + +const hashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JAU4EEwEKADgWIQSRpuf4XQXG +VjC+8YlRhS2HNI/8TAUCXn0BIQIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRBRhS2HNI/8TJITCACT2Zu2l8Jo/YLQMs+iYsC3gn5qJE/qf60VWpOnP0LG24rj +k3j4ET5P2ow/o9lQNCM/fJrEB2CwhnlvbrLbNBbt2e35QVWvvxwFZwVcoBQXTXdT ++G2cKS2Snc0bhNF7jcPX1zau8gxLurxQBaRdoL38XQ41aKfdOjEico4ZxQYSrOoC +RbF6FODXj+ZL8CzJFa2Sd0rHAROHoF7WhKOvTrg1u8JvHrSgvLYGBHQZUV23cmXH +yvzITl5jFzORf9TUdSv8tnuAnNsOV4vOA6lj61Z3/0Vgor+ZByfiznonPHQtKYtY +kac1M/Dq2xZYiSf0tDFywgUDIF/IyS348wKmnDGjuQENBFMORM0BCADWj1GNOP4O +wJmJDjI2gmeok6fYQeUbI/+Hnv5Z/cAK80Tvft3noy1oedxaDdazvrLu7YlyQOWA +M1curbqJa6ozPAwc7T8XSwWxIuFfo9rStHQE3QUARxIdziQKTtlAbXI2mQU99c6x +vSueQ/gq3ICFRBwCmPAm+JCwZG+cDLJJ/g6wEilNATSFdakbMX4lHUB2X0qradNO +J66pdZWxTCxRLomPBWa5JEPanbosaJk0+n9+P6ImPiWpt8wiu0Qzfzo7loXiDxo/ +0G8fSbjYsIF+skY+zhNbY1MenfIPctB9X5iyW291mWW7rhhZyuqqxN2xnmPPgFmi +QGd+8KVodadHABEBAAGJATwEGAECACYCGwwWIQSRpuf4XQXGVjC+8YlRhS2HNI/8 +TAUCXn0BRAUJEvOKdwAKCRBRhS2HNI/8TEzUB/9pEHVwtTxL8+VRq559Q0tPOIOb +h3b+GroZRQGq/tcQDVbYOO6cyRMR9IohVJk0b9wnnUHoZpoA4H79UUfIB4sZngma +enL/9magP1uAHxPxEa5i/yYqR0MYfz4+PGdvqyj91NrkZm3WIpwzqW/KZp8YnD77 +VzGVodT8xqAoHW+bHiza9Jmm9Rkf5/0i0JY7GXoJgk4QBG/Fcp0OR5NUWxN3PEM0 +dpeiU4GI5wOz5RAIOvSv7u1h0ZxMnJG4B4MKniIAr4yD7WYYZh/VxEPeiS/E1CVx +qHV5VVCoEIoYVHIuFIyFu1lIcei53VD6V690rmn0bp4A5hs+kErhThvkok3c +=+mCN +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go new file mode 100644 index 00000000..3dd69635 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go @@ -0,0 +1,62 @@ +package tfinstall + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +const baseURL = "https://releases.hashicorp.com/terraform" + +type ExecPathFinder interface { + ExecPath(context.Context) (string, error) +} + +func Find(ctx context.Context, opts ...ExecPathFinder) (string, error) { + var terraformPath string + + // go through the options in order + // until a valid terraform executable is found + for _, opt := range opts { + p, err := opt.ExecPath(ctx) + if err != nil { + return "", fmt.Errorf("unexpected error: %s", err) + } + + if p == "" { + // strategy did not locate an executable - fall through to next + continue + } else { + terraformPath = p + break + } + } + + if terraformPath == "" { + return "", fmt.Errorf("could not find terraform executable") + } + + err := runTerraformVersion(terraformPath) + if err != nil { + return "", fmt.Errorf("executable found at path %s is not terraform: %s", terraformPath, err) + } + + return terraformPath, nil +} + +func runTerraformVersion(execPath string) error { + cmd := exec.Command(execPath, "version") + + out, err := cmd.Output() + if err != nil { + return err + } + + // very basic sanity check + if !strings.Contains(string(out), "Terraform v") { + return fmt.Errorf("located executable at %s, but output of `terraform version` was:\n%s", execPath, out) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-json/.gitignore b/vendor/github.com/hashicorp/terraform-json/.gitignore new file mode 100644 index 00000000..15b499b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/.gitignore @@ -0,0 +1,3 @@ +.terraform +plan.tfplan +terraform.tfstate.backup diff --git a/vendor/github.com/hashicorp/terraform-json/LICENSE b/vendor/github.com/hashicorp/terraform-json/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-json/Makefile b/vendor/github.com/hashicorp/terraform-json/Makefile new file mode 100644 index 00000000..a279462b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/Makefile @@ -0,0 +1,20 @@ +GOTOOLS = \ + gotest.tools/gotestsum + +test: tools + gotestsum --format=short-verbose $(TEST) $(TESTARGS) + +generate: + cd testdata && make generate + +modules: + go mod download && go mod verify + +test-circle: + mkdir -p test-results/terraform-json + gotestsum --format=short-verbose --junitfile test-results/terraform-json/results.xml + +tools: + go install $(GOTOOLS) + +.PHONY: test generate modules test-circle tools diff --git a/vendor/github.com/hashicorp/terraform-json/README.md b/vendor/github.com/hashicorp/terraform-json/README.md new file mode 100644 index 00000000..78fd3bfa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/README.md @@ -0,0 +1,33 @@ +# terraform-json + +[![CircleCI](https://circleci.com/gh/hashicorp/terraform-json/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/terraform-json/tree/master) +[![GoDoc](https://godoc.org/github.com/hashicorp/terraform-json?status.svg)](https://godoc.org/github.com/hashicorp/terraform-json) + +This repository houses data types designed to help parse the data produced by +two [Terraform](https://www.terraform.io/) commands: + +* [`terraform show -json`](https://www.terraform.io/docs/commands/show.html#json-output) +* [`terraform providers schema -json`](https://www.terraform.io/docs/commands/providers/schema.html#json) + +While containing mostly data types, there are also a few helpers to assist with +working with the data. + +This repository also serves as de facto documentation for the formats produced +by these commands. For more details, see the +[GoDoc](https://godoc.org/github.com/hashicorp/terraform-json). + +## Why a Separate Repository? + +To reduce dependencies on any of Terraform core's internals, we've made a design +decision to make any helpers or libraries that work with the external JSON data +external and not a part of the Terraform GitHub repository itself. + +While Terraform core will change often and be relatively unstable, this library +will see a smaller amount of change. Most of the major changes have already +happened leading up to 0.12, so you can expect this library to only see minor +incremental changes going forward. + +For this reason, `terraform show -json` and `terraform providers schema -json` +is the recommended format for working with Terraform data externally, and as +such, if you require any help working with the data in these formats, or even a +reference of how the JSON is formatted, use this repository. diff --git a/vendor/github.com/hashicorp/terraform-json/action.go b/vendor/github.com/hashicorp/terraform-json/action.go new file mode 100644 index 00000000..51c4c836 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/action.go @@ -0,0 +1,104 @@ +package tfjson + +// Action is a valid action type for a resource change. +// +// Note that a singular Action is not telling of a full resource +// change operation. Certain resource actions, such as replacement, +// are a composite of more than one type. See the Actions type and +// its helpers for more information. +type Action string + +const ( + // ActionNoop denotes a no-op operation. + ActionNoop Action = "no-op" + + // ActionCreate denotes a create operation. + ActionCreate Action = "create" + + // ActionRead denotes a read operation. + ActionRead Action = "read" + + // ActionUpdate denotes an update operation. + ActionUpdate Action = "update" + + // ActionDelete denotes a delete operation. + ActionDelete Action = "delete" +) + +// Actions denotes a valid change type. +type Actions []Action + +// NoOp is true if this set of Actions denotes a no-op. +func (a Actions) NoOp() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionNoop +} + +// Create is true if this set of Actions denotes creation of a new +// resource. +func (a Actions) Create() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionCreate +} + +// Read is true if this set of Actions denotes a read operation only. +func (a Actions) Read() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionRead +} + +// Update is true if this set of Actions denotes an update operation. +func (a Actions) Update() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionUpdate +} + +// Delete is true if this set of Actions denotes resource removal. +func (a Actions) Delete() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionDelete +} + +// DestroyBeforeCreate is true if this set of Actions denotes a +// destroy-before-create operation. This is the standard resource +// replacement method. +func (a Actions) DestroyBeforeCreate() bool { + if len(a) != 2 { + return false + } + + return a[0] == ActionDelete && a[1] == ActionCreate +} + +// CreateBeforeDestroy is true if this set of Actions denotes a +// create-before-destroy operation, usually the result of replacement +// to a resource that has the create_before_destroy lifecycle option +// set. +func (a Actions) CreateBeforeDestroy() bool { + if len(a) != 2 { + return false + } + + return a[0] == ActionCreate && a[1] == ActionDelete +} + +// Replace is true if this set of Actions denotes a valid replacement +// operation. +func (a Actions) Replace() bool { + return a.DestroyBeforeCreate() || a.CreateBeforeDestroy() +} diff --git a/vendor/github.com/hashicorp/terraform-json/config.go b/vendor/github.com/hashicorp/terraform-json/config.go new file mode 100644 index 00000000..36e53e6e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/config.go @@ -0,0 +1,188 @@ +package tfjson + +import ( + "encoding/json" + "errors" +) + +// Config represents the complete configuration source. +type Config struct { + // A map of all provider instances across all modules in the + // configuration. + // + // The index for this field is opaque and should not be parsed. Use + // the individual fields in ProviderConfig to discern actual data + // about the provider such as name, alias, or defined module. + ProviderConfigs map[string]*ProviderConfig `json:"provider_config,omitempty"` + + // The root module in the configuration. Any child modules descend + // off of here. + RootModule *ConfigModule `json:"root_module,omitempty"` +} + +// Validate checks to ensure that the config is present. +func (c *Config) Validate() error { + if c == nil { + return errors.New("config is nil") + } + + return nil +} + +func (c *Config) UnmarshalJSON(b []byte) error { + type rawConfig Config + var config rawConfig + + err := json.Unmarshal(b, &config) + if err != nil { + return err + } + + *c = *(*Config)(&config) + + return c.Validate() +} + +// ProviderConfig describes a provider configuration instance. +type ProviderConfig struct { + // The name of the provider, ie: "aws". + Name string `json:"name,omitempty"` + + // The alias of the provider, ie: "us-east-1". + Alias string `json:"alias,omitempty"` + + // The address of the module the provider is declared in. + ModuleAddress string `json:"module_address,omitempty"` + + // Any non-special configuration values in the provider, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` + + // The defined version constraint for this provider. + VersionConstraint string `json:"version_constraint,omitempty"` +} + +// ConfigModule describes a module in Terraform configuration. +type ConfigModule struct { + // The outputs defined in the module. + Outputs map[string]*ConfigOutput `json:"outputs,omitempty"` + + // The resources defined in the module. + Resources []*ConfigResource `json:"resources,omitempty"` + + // Any "module" stanzas within the specific module. + ModuleCalls map[string]*ModuleCall `json:"module_calls,omitempty"` + + // The variables defined in the module. + Variables map[string]*ConfigVariable `json:"variables,omitempty"` +} + +// ConfigOutput defines an output as defined in configuration. +type ConfigOutput struct { + // Indicates whether or not the output was marked as sensitive. + Sensitive bool `json:"sensitive,omitempty"` + + // The defined value of the output. + Expression *Expression `json:"expression,omitempty"` + + // The defined description of this output. + Description string `json:"description,omitempty"` + + // The defined dependencies tied to this output. + DependsOn []string `json:"depends_on,omitempty"` +} + +// ConfigResource is the configuration representation of a resource. +type ConfigResource struct { + // The address of the resource relative to the module that it is + // in. + Address string `json:"address,omitempty"` + + // The resource mode. + Mode ResourceMode `json:"mode,omitempty"` + + // The type of resource, ie: "null_resource" in + // "null_resource.foo". + Type string `json:"type,omitempty"` + + // The name of the resource, ie: "foo" in "null_resource.foo". + Name string `json:"name,omitempty"` + + // An opaque key representing the provider configuration this + // module uses. Note that there are more than one circumstance that + // this key will not match what is found in the ProviderConfigs + // field in the root Config structure, and as such should not be + // relied on for that purpose. + ProviderConfigKey string `json:"provider_config_key,omitempty"` + + // The list of provisioner defined for this configuration. This + // will be nil if no providers are defined. + Provisioners []*ConfigProvisioner `json:"provisioners,omitempty"` + + // Any non-special configuration values in the resource, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` + + // The resource's configuration schema version. With access to the + // specific Terraform provider for this resource, this can be used + // to determine the correct schema for the configuration data + // supplied in Expressions. + SchemaVersion uint64 `json:"schema_version"` + + // The expression data for the "count" value in the resource. + CountExpression *Expression `json:"count_expression,omitempty"` + + // The expression data for the "for_each" value in the resource. + ForEachExpression *Expression `json:"for_each_expression,omitempty"` + + // The contents of the "depends_on" config directive, which + // declares explicit dependencies for this resource. + DependsOn []string `json:"depends_on,omitempty"` +} + +// ConfigVariable defines a variable as defined in configuration. +type ConfigVariable struct { + // The defined default value of the variable. + Default interface{} `json:"default,omitempty"` + + // The defined text description of the variable. + Description string `json:"description,omitempty"` +} + +// ConfigProvisioner describes a provisioner declared in a resource +// configuration. +type ConfigProvisioner struct { + // The type of the provisioner, ie: "local-exec". + Type string `json:"type,omitempty"` + + // Any non-special configuration values in the provisioner, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` +} + +// ModuleCall describes a declared "module" within a configuration. +// It also contains the data for the module itself. +type ModuleCall struct { + // The contents of the "source" field. + Source string `json:"source,omitempty"` + + // Any non-special configuration values in the module, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` + + // The expression data for the "count" value in the module. + CountExpression *Expression `json:"count_expression,omitempty"` + + // The expression data for the "for_each" value in the module. + ForEachExpression *Expression `json:"for_each_expression,omitempty"` + + // The configuration data for the module itself. + Module *ConfigModule `json:"module,omitempty"` + + // The version constraint for modules that come from the registry. + VersionConstraint string `json:"version_constraint,omitempty"` + + // The explicit resource dependencies for the "depends_on" value. + // As it must be a slice of references, Expression is not used. + DependsOn []string `json:"depends_on,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/expression.go b/vendor/github.com/hashicorp/terraform-json/expression.go new file mode 100644 index 00000000..8a39face --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/expression.go @@ -0,0 +1,127 @@ +package tfjson + +import "encoding/json" + +type unknownConstantValue struct{} + +// UnknownConstantValue is a singleton type that denotes that a +// constant value is explicitly unknown. This is set during an +// unmarshal when references are found in an expression to help more +// explicitly differentiate between an explicit null and unknown +// value. +var UnknownConstantValue = &unknownConstantValue{} + +// Expression describes the format for an individual key in a +// Terraform configuration. +// +// This struct wraps ExpressionData to support custom JSON parsing. +type Expression struct { + *ExpressionData +} + +// ExpressionData describes the format for an individual key in a +// Terraform configuration. +type ExpressionData struct { + // If the *entire* expression is a constant-defined value, this + // will contain the Go representation of the expression's data. + // + // Note that a nil here denotes and explicit null. When a value is + // unknown on part of the value coming from an expression that + // cannot be resolved at parse time, this field will contain + // UnknownConstantValue. + ConstantValue interface{} `json:"constant_value,omitempty"` + + // If any part of the expression contained values that were not + // able to be resolved at parse-time, this will contain a list of + // the referenced identifiers that caused the value to be unknown. + References []string `json:"references,omitempty"` + + // A list of complex objects that were nested in this expression. + // If this value is a nested block in configuration, sometimes + // referred to as a "sub-resource", this field will contain those + // values, and ConstantValue and References will be blank. + NestedBlocks []map[string]*Expression `json:"-"` +} + +// UnmarshalJSON implements json.Unmarshaler for Expression. +func (e *Expression) UnmarshalJSON(b []byte) error { + result := new(ExpressionData) + + // Check to see if this is an array first. If it is, this is more + // than likely a list of nested blocks. + var rawNested []map[string]json.RawMessage + if err := json.Unmarshal(b, &rawNested); err == nil { + result.NestedBlocks, err = unmarshalExpressionBlocks(rawNested) + if err != nil { + return err + } + } else { + // It's a non-nested expression block, parse normally + if err := json.Unmarshal(b, &result); err != nil { + return err + } + + // If References is non-zero, then ConstantValue is unknown. Set + // this explicitly. + if len(result.References) > 0 { + result.ConstantValue = UnknownConstantValue + } + } + + e.ExpressionData = result + return nil +} + +func unmarshalExpressionBlocks(raw []map[string]json.RawMessage) ([]map[string]*Expression, error) { + var result []map[string]*Expression + + for _, rawBlock := range raw { + block := make(map[string]*Expression) + for k, rawExpr := range rawBlock { + var expr *Expression + if err := json.Unmarshal(rawExpr, &expr); err != nil { + return nil, err + } + + block[k] = expr + } + + result = append(result, block) + } + + return result, nil +} + +// MarshalJSON implements json.Marshaler for Expression. +func (e *Expression) MarshalJSON() ([]byte, error) { + switch { + case len(e.ExpressionData.NestedBlocks) > 0: + return marshalExpressionBlocks(e.ExpressionData.NestedBlocks) + + case e.ExpressionData.ConstantValue == UnknownConstantValue: + return json.Marshal(&ExpressionData{ + References: e.ExpressionData.References, + }) + } + + return json.Marshal(e.ExpressionData) +} + +func marshalExpressionBlocks(nested []map[string]*Expression) ([]byte, error) { + var rawNested []map[string]json.RawMessage + for _, block := range nested { + rawBlock := make(map[string]json.RawMessage) + for k, expr := range block { + raw, err := json.Marshal(expr) + if err != nil { + return nil, err + } + + rawBlock[k] = raw + } + + rawNested = append(rawNested, rawBlock) + } + + return json.Marshal(rawNested) +} diff --git a/vendor/github.com/hashicorp/terraform-json/go.mod b/vendor/github.com/hashicorp/terraform-json/go.mod new file mode 100644 index 00000000..415bf18b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/go.mod @@ -0,0 +1,9 @@ +module github.com/hashicorp/terraform-json + +go 1.13 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/go-cmp v0.3.1 + github.com/zclconf/go-cty v1.2.1 +) diff --git a/vendor/github.com/hashicorp/terraform-json/go.sum b/vendor/github.com/hashicorp/terraform-json/go.sum new file mode 100644 index 00000000..1bb69979 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/go.sum @@ -0,0 +1,19 @@ +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= +github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go new file mode 100644 index 00000000..2a8ebd21 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/plan.go @@ -0,0 +1,159 @@ +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" +) + +// PlanFormatVersion is the version of the JSON plan format that is +// supported by this package. +const PlanFormatVersion = "0.1" + +// ResourceMode is a string representation of the resource type found +// in certain fields in the plan. +type ResourceMode string + +const ( + // DataResourceMode is the resource mode for data sources. + DataResourceMode ResourceMode = "data" + + // ManagedResourceMode is the resource mode for managed resources. + ManagedResourceMode ResourceMode = "managed" +) + +// Plan represents the entire contents of an output Terraform plan. +type Plan struct { + // The version of the plan format. This should always match the + // PlanFormatVersion constant in this package, or else an unmarshal + // will be unstable. + FormatVersion string `json:"format_version,omitempty"` + + // The version of Terraform used to make the plan. + TerraformVersion string `json:"terraform_version,omitempty"` + + // The variables set in the root module when creating the plan. + Variables map[string]*PlanVariable `json:"variables,omitempty"` + + // The common state representation of resources within this plan. + // This is a product of the existing state merged with the diff for + // this plan. + PlannedValues *StateValues `json:"planned_values,omitempty"` + + // The change operations for resources and data sources within this + // plan. + ResourceChanges []*ResourceChange `json:"resource_changes,omitempty"` + + // The change operations for outputs within this plan. + OutputChanges map[string]*Change `json:"output_changes,omitempty"` + + // The Terraform state prior to the plan operation. This is the + // same format as PlannedValues, without the current diff merged. + PriorState *State `json:"prior_state,omitempty"` + + // The Terraform configuration used to make the plan. + Config *Config `json:"configuration,omitempty"` +} + +// Validate checks to ensure that the plan is present, and the +// version matches the version supported by this library. +func (p *Plan) Validate() error { + if p == nil { + return errors.New("plan is nil") + } + + if p.FormatVersion == "" { + return errors.New("unexpected plan input, format version is missing") + } + + if PlanFormatVersion != p.FormatVersion { + return fmt.Errorf("unsupported plan format version: expected %q, got %q", PlanFormatVersion, p.FormatVersion) + } + + return nil +} + +func (p *Plan) UnmarshalJSON(b []byte) error { + type rawPlan Plan + var plan rawPlan + + err := json.Unmarshal(b, &plan) + if err != nil { + return err + } + + *p = *(*Plan)(&plan) + + return p.Validate() +} + +// ResourceChange is a description of an individual change action +// that Terraform plans to use to move from the prior state to a new +// state matching the configuration. +type ResourceChange struct { + // The absolute resource address. + Address string `json:"address,omitempty"` + + // The module portion of the above address. Omitted if the instance + // is in the root module. + ModuleAddress string `json:"module_address,omitempty"` + + // The resource mode. + Mode ResourceMode `json:"mode,omitempty"` + + // The resource type, example: "aws_instance" for aws_instance.foo. + Type string `json:"type,omitempty"` + + // The resource name, example: "foo" for aws_instance.foo. + Name string `json:"name,omitempty"` + + // The instance key for any resources that have been created using + // "count" or "for_each". If neither of these apply the key will be + // empty. + // + // This value can be either an integer (int) or a string. + Index interface{} `json:"index,omitempty"` + + // The name of the provider this resource belongs to. This allows + // the provider to be interpreted unambiguously in the unusual + // situation where a provider offers a resource type whose name + // does not start with its own name, such as the "googlebeta" + // provider offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // An identifier used during replacement operations, and can be + // used to identify the exact resource being replaced in state. + DeposedKey string `json:"deposed,omitempty"` + + // The data describing the change that will be made to this object. + Change *Change `json:"change,omitempty"` +} + +// Change is the representation of a proposed change for an object. +type Change struct { + // The action to be carried out by this change. + Actions Actions `json:"actions,omitempty"` + + // Before and After are representations of the object value both + // before and after the action. For create and delete actions, + // either Before or After is unset (respectively). For no-op + // actions, both values will be identical. After will be incomplete + // if there are values within it that won't be known until after + // apply. + Before interface{} `json:"before,"` + After interface{} `json:"after,omitempty"` + + // A deep object of booleans that denotes any values that are + // unknown in a resource. These values were previously referred to + // as "computed" values. + // + // If the value cannot be found in this map, then its value should + // be available within After, so long as the operation supports it. + AfterUnknown interface{} `json:"after_unknown,omitempty"` +} + +// PlanVariable is a top-level variable in the Terraform plan. +type PlanVariable struct { + // The value for this variable at plan time. + Value interface{} `json:"value,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go new file mode 100644 index 00000000..e688c172 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -0,0 +1,193 @@ +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// ProviderSchemasFormatVersion is the version of the JSON provider +// schema format that is supported by this package. +const ProviderSchemasFormatVersion = "0.1" + +// ProviderSchemas represents the schemas of all providers and +// resources in use by the configuration. +type ProviderSchemas struct { + // The version of the plan format. This should always match the + // ProviderSchemasFormatVersion constant in this package, or else + // an unmarshal will be unstable. + FormatVersion string `json:"format_version,omitempty"` + + // The schemas for the providers in this configuration, indexed by + // provider type. Aliases are not included, and multiple instances + // of a provider in configuration will be represented by a single + // provider here. + Schemas map[string]*ProviderSchema `json:"provider_schemas,omitempty"` +} + +// Validate checks to ensure that ProviderSchemas is present, and the +// version matches the version supported by this library. +func (p *ProviderSchemas) Validate() error { + if p == nil { + return errors.New("provider schema data is nil") + } + + if p.FormatVersion == "" { + return errors.New("unexpected provider schema data, format version is missing") + } + + if ProviderSchemasFormatVersion != p.FormatVersion { + return fmt.Errorf("unsupported provider schema data format version: expected %q, got %q", PlanFormatVersion, p.FormatVersion) + } + + return nil +} + +func (p *ProviderSchemas) UnmarshalJSON(b []byte) error { + type rawSchemas ProviderSchemas + var schemas rawSchemas + + err := json.Unmarshal(b, &schemas) + if err != nil { + return err + } + + *p = *(*ProviderSchemas)(&schemas) + + return p.Validate() +} + +// ProviderSchema is the JSON representation of the schema of an +// entire provider, including the provider configuration and any +// resources and data sources included with the provider. +type ProviderSchema struct { + // The schema for the provider's configuration. + ConfigSchema *Schema `json:"provider,omitempty"` + + // The schemas for any resources in this provider. + ResourceSchemas map[string]*Schema `json:"resource_schemas,omitempty"` + + // The schemas for any data sources in this provider. + DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"` +} + +// Schema is the JSON representation of a particular schema +// (provider configuration, resources, data sources). +type Schema struct { + // The version of the particular resource schema. + Version uint64 `json:"version"` + + // The root-level block of configuration values. + Block *SchemaBlock `json:"block,omitempty"` +} + +// SchemaDescriptionKind describes the format type for a particular description's field. +type SchemaDescriptionKind string + +const ( + // SchemaDescriptionKindPlain indicates a string in plain text format. + SchemaDescriptionKindPlain SchemaDescriptionKind = "plain" + + // SchemaDescriptionKindMarkdown indicates a Markdown string and may need to be + // processed prior to presentation. + SchemaDescriptionKindMarkdown SchemaDescriptionKind = "markdown" +) + +// SchemaBlock represents a nested block within a particular schema. +type SchemaBlock struct { + // The attributes defined at the particular level of this block. + Attributes map[string]*SchemaAttribute `json:"attributes,omitempty"` + + // Any nested blocks within this particular block. + NestedBlocks map[string]*SchemaBlockType `json:"block_types,omitempty"` + + // The description for this block and format of the description. If + // no kind is provided, it can be assumed to be plain text. + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + + // If true, this block is deprecated. + Deprecated bool `json:"deprecated,omitempty"` +} + +// SchemaNestingMode is the nesting mode for a particular nested +// schema block. +type SchemaNestingMode string + +const ( + // SchemaNestingModeSingle denotes single block nesting mode, which + // allows a single block of this specific type only in + // configuration. This is generally the same as list or set types + // with a single-element constraint. + SchemaNestingModeSingle SchemaNestingMode = "single" + + // SchemaNestingModeList denotes list block nesting mode, which + // allows an ordered list of blocks where duplicates are allowed. + SchemaNestingModeList SchemaNestingMode = "list" + + // SchemaNestingModeSet denotes set block nesting mode, which + // allows an unordered list of blocks where duplicates are + // generally not allowed. What is considered a duplicate is up to + // the rules of the set itself, which may or may not cover all + // fields in the block. + SchemaNestingModeSet SchemaNestingMode = "set" + + // SchemaNestingModeMap denotes map block nesting mode. This + // creates a map of all declared blocks of the block type within + // the parent, keying them on the label supplied in the block + // declaration. This allows for blocks to be declared in the same + // style as resources. + SchemaNestingModeMap SchemaNestingMode = "map" +) + +// SchemaBlockType describes a nested block within a schema. +type SchemaBlockType struct { + // The nesting mode for this block. + NestingMode SchemaNestingMode `json:"nesting_mode,omitempty"` + + // The block data for this block type, including attributes and + // subsequent nested blocks. + Block *SchemaBlock `json:"block,omitempty"` + + // The lower limit on items that can be declared of this block + // type. + MinItems uint64 `json:"min_items,omitempty"` + + // The upper limit on items that can be declared of this block + // type. + MaxItems uint64 `json:"max_items,omitempty"` +} + +// SchemaAttribute describes an attribute within a schema block. +type SchemaAttribute struct { + // The attribute type. + AttributeType cty.Type `json:"type,omitempty"` + + // The description field for this attribute. If no kind is + // provided, it can be assumed to be plain text. + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + + // If true, this attribute is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // If true, this attribute is required - it has to be entered in + // configuration. + Required bool `json:"required,omitempty"` + + // If true, this attribute is optional - it does not need to be + // entered in configuration. + Optional bool `json:"optional,omitempty"` + + // If true, this attribute is computed - it can be set by the + // provider. It may also be set by configuration if Optional is + // true. + Computed bool `json:"computed,omitempty"` + + // If true, this attribute is sensitive and will not be displayed + // in logs. Future versions of Terraform may encrypt or otherwise + // treat these values with greater care than non-sensitive fields. + Sensitive bool `json:"sensitive,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/state.go b/vendor/github.com/hashicorp/terraform-json/state.go new file mode 100644 index 00000000..e1a9149c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/state.go @@ -0,0 +1,160 @@ +package tfjson + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" +) + +// StateFormatVersion is the version of the JSON state format that is +// supported by this package. +const StateFormatVersion = "0.1" + +// State is the top-level representation of a Terraform state. +type State struct { + // useJSONNumber opts into the behavior of calling + // json.Decoder.UseNumber prior to decoding the state, which turns + // numbers into json.Numbers instead of float64s. Set it using + // State.UseJSONNumber. + useJSONNumber bool + + // The version of the state format. This should always match the + // StateFormatVersion constant in this package, or else am + // unmarshal will be unstable. + FormatVersion string `json:"format_version,omitempty"` + + // The Terraform version used to make the state. + TerraformVersion string `json:"terraform_version,omitempty"` + + // The values that make up the state. + Values *StateValues `json:"values,omitempty"` +} + +// UseJSONNumber controls whether the State will be decoded using the +// json.Number behavior or the float64 behavior. When b is true, the State will +// represent numbers in StateOutputs as json.Numbers. When b is false, the +// State will represent numbers in StateOutputs as float64s. +func (s *State) UseJSONNumber(b bool) { + s.useJSONNumber = b +} + +// Validate checks to ensure that the state is present, and the +// version matches the version supported by this library. +func (s *State) Validate() error { + if s == nil { + return errors.New("state is nil") + } + + if s.FormatVersion == "" { + return errors.New("unexpected state input, format version is missing") + } + + if StateFormatVersion != s.FormatVersion { + return fmt.Errorf("unsupported state format version: expected %q, got %q", StateFormatVersion, s.FormatVersion) + } + + return nil +} + +func (s *State) UnmarshalJSON(b []byte) error { + type rawState State + var state rawState + + dec := json.NewDecoder(bytes.NewReader(b)) + if s.useJSONNumber { + dec.UseNumber() + } + err := dec.Decode(&state) + if err != nil { + return err + } + + *s = *(*State)(&state) + + return s.Validate() +} + +// StateValues is the common representation of resolved values for both the +// prior state (which is always complete) and the planned new state. +type StateValues struct { + // The Outputs for this common state representation. + Outputs map[string]*StateOutput `json:"outputs,omitempty"` + + // The root module in this state representation. + RootModule *StateModule `json:"root_module,omitempty"` +} + +// StateModule is the representation of a module in the common state +// representation. This can be the root module or a child module. +type StateModule struct { + // All resources or data sources within this module. + Resources []*StateResource `json:"resources,omitempty"` + + // The absolute module address, omitted for the root module. + Address string `json:"address,omitempty"` + + // Any child modules within this module. + ChildModules []*StateModule `json:"child_modules,omitempty"` +} + +// StateResource is the representation of a resource in the common +// state representation. +type StateResource struct { + // The absolute resource address. + Address string `json:"address,omitempty"` + + // The resource mode. + Mode ResourceMode `json:"mode,omitempty"` + + // The resource type, example: "aws_instance" for aws_instance.foo. + Type string `json:"type,omitempty"` + + // The resource name, example: "foo" for aws_instance.foo. + Name string `json:"name,omitempty"` + + // The instance key for any resources that have been created using + // "count" or "for_each". If neither of these apply the key will be + // empty. + // + // This value can be either an integer (int) or a string. + Index interface{} `json:"index,omitempty"` + + // The name of the provider this resource belongs to. This allows + // the provider to be interpreted unambiguously in the unusual + // situation where a provider offers a resource type whose name + // does not start with its own name, such as the "googlebeta" + // provider offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // The version of the resource type schema the "values" property + // conforms to. + SchemaVersion uint64 `json:"schema_version,"` + + // The JSON representation of the attribute values of the resource, + // whose structure depends on the resource type schema. Any unknown + // values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues map[string]interface{} `json:"values,omitempty"` + + // The addresses of the resources that this resource depends on. + DependsOn []string `json:"depends_on,omitempty"` + + // If true, the resource has been marked as tainted and will be + // re-created on the next update. + Tainted bool `json:"tainted,omitempty"` + + // DeposedKey is set if the resource instance has been marked Deposed and + // will be destroyed on the next apply. + DeposedKey string `json:"deposed_key,omitempty"` +} + +// StateOutput represents an output value in a common state +// representation. +type StateOutput struct { + // Whether or not the output was marked as sensitive. + Sensitive bool `json:"sensitive"` + + // The value of the output. + Value interface{} `json:"value,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/tfjson.go b/vendor/github.com/hashicorp/terraform-json/tfjson.go new file mode 100644 index 00000000..55f9ac44 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/tfjson.go @@ -0,0 +1,9 @@ +// Package tfjson is a de-coupled helper library containing types for +// the plan format output by "terraform show -json" command. This +// command is designed for the export of Terraform plan data in +// a format that can be easily processed by tools unrelated to +// Terraform. +// +// This format is stable and should be used over the binary plan data +// whenever possible. +package tfjson diff --git a/vendor/github.com/hashicorp/terraform-json/validate.go b/vendor/github.com/hashicorp/terraform-json/validate.go new file mode 100644 index 00000000..7de13ad5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/validate.go @@ -0,0 +1,33 @@ +package tfjson + +// Pos represents a position in a config file +type Pos struct { + Line int `json:"line"` + Column int `json:"column"` + Byte int `json:"byte"` +} + +// Range represents a range of bytes between two positions +type Range struct { + Filename string `json:"filename"` + Start Pos `json:"start"` + End Pos `json:"end"` +} + +// Diagnostic represents information to be presented to a user about an +// error or anomaly in parsing or evaluating configuration +type Diagnostic struct { + Severity string `json:"severity,omitempty"` + Summary string `json:"summary,omitempty"` + Detail string `json:"detail,omitempty"` + Range *Range `json:"range,omitempty"` +} + +// ValidateOutput represents JSON output from terraform validate +// (available from 0.12 onwards) +type ValidateOutput struct { + Valid bool `json:"valid"` + ErrorCount int `json:"error_count"` + WarningCount int `json:"warning_count"` + Diagnostics []Diagnostic `json:"diagnostics"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/version.go b/vendor/github.com/hashicorp/terraform-json/version.go new file mode 100644 index 00000000..16f0a853 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/version.go @@ -0,0 +1,11 @@ +package tfjson + +// VersionOutput represents output from the version -json command +// added in v0.13 +type VersionOutput struct { + Version string `json:"terraform_version"` + Revision string `json:"terraform_revision"` + Platform string `json:"platform,omitempty"` + ProviderSelections map[string]string `json:"provider_selections"` + Outdated bool `json:"terraform_outdated"` +} diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE similarity index 100% rename from vendor/github.com/mitchellh/cli/LICENSE rename to vendor/github.com/hashicorp/terraform-plugin-go/LICENSE diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go new file mode 100644 index 00000000..bdeac863 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go @@ -0,0 +1,98 @@ +package tfprotov5 + +import ( + "context" +) + +// DataSourceServer is an interface containing the methods a data source +// implementation needs to fill. +type DataSourceServer interface { + // ValidateDataSourceConfig is called when Terraform is checking that a + // data source's configuration is valid. It is guaranteed to have types + // conforming to your schema, but it is not guaranteed that all values + // will be known. This is your opportunity to do custom or advanced + // validation prior to a plan being generated. + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfigRequest) (*ValidateDataSourceConfigResponse, error) + + // ReadDataSource is called when Terraform is refreshing a data + // source's state. + ReadDataSource(context.Context, *ReadDataSourceRequest) (*ReadDataSourceResponse, error) +} + +// ValidateDataSourceConfigRequest is the request Terraform sends when it wants +// to validate a data source's configuration. +type ValidateDataSourceConfigRequest struct { + // TypeName is the type of data source Terraform is validating. + TypeName string + + // Config is the configuration the user supplied for that data source. + // See the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config *DynamicValue +} + +// ValidateDataSourceConfigResponse is the response from the provider about the +// validity of a data source's configuration. +type ValidateDataSourceConfigResponse struct { + // Diagnostics report errors or warnings related to the given + // configuration. Returning an empty slice indicates a successful + // validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ReadDataSourceRequest is the request Terraform sends when it wants to get +// the latest state for a data source. +type ReadDataSourceRequest struct { + // TypeName is the type of data source Terraform is requesting an + // updated state for. + TypeName string + + // Config is the configuration the user supplied for that data source. + // See the documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may have unknown values. + Config *DynamicValue + + // ProviderMeta supplies the provider metadata configuration for the + // module this data source is in. Module-specific provider metadata is + // an advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ReadDataSourceResponse is the response from the provider about the current +// state of the requested data source. +type ReadDataSourceResponse struct { + // State is the current state of the data source, represented as a + // `DynamicValue`. See the documentation on `DynamicValue` for + // information about safely creating the `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + State *DynamicValue + + // Diagnostics report errors or warnings related to retrieving the + // current state of the requested data source. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go new file mode 100644 index 00000000..04a39deb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go @@ -0,0 +1,58 @@ +package tfprotov5 + +import "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + +const ( + // DiagnosticSeverityInvalid is used to indicate an invalid + // `DiagnosticSeverity`. Provider developers should not use it. + DiagnosticSeverityInvalid DiagnosticSeverity = 0 + + // DiagnosticSeverityError is used to indicate that a `Diagnostic` + // represents an error and should halt Terraform execution. + DiagnosticSeverityError DiagnosticSeverity = 1 + + // DiagnosticSeverityWarning is used to indicate that a `Diagnostic` + // represents a warning and should not halt Terraform's execution, but + // it should be surfaced to the user. + DiagnosticSeverityWarning DiagnosticSeverity = 2 +) + +// Diagnostic is used to convey information back the user running Terraform. +type Diagnostic struct { + // Severity indicates how Terraform should handle the Diagnostic. + Severity DiagnosticSeverity + + // Summary is a brief description of the problem, roughly + // sentence-sized, and should provide a concise description of what + // went wrong. For example, a Summary could be as simple as "Invalid + // value.". + Summary string + + // Detail is a lengthier, more complete description of the problem. + // Detail should provide enough information that a user can resolve the + // problem entirely. For example, a Detail could be "Values must be + // alphanumeric and lowercase only." + Detail string + + // Attribute indicates which field, specifically, has the problem. Not + // setting this will indicate the entire resource; setting it will + // indicate that the problem is with a certain field in the resource, + // which helps users find the source of the problem. + Attribute *tftypes.AttributePath +} + +// DiagnosticSeverity represents different classes of Diagnostic which affect +// how Terraform handles the Diagnostics. +type DiagnosticSeverity int32 + +func (d DiagnosticSeverity) String() string { + switch d { + case 0: + return "INVALID" + case 1: + return "ERROR" + case 2: + return "WARNING" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go new file mode 100644 index 00000000..fdaf5cde --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go @@ -0,0 +1,29 @@ +// Package tfprotov5 provides the interfaces and types needed to build a +// Terraform provider server. +// +// All Terraform provider servers should be built on +// these types, to take advantage of the ecosystem and tooling built around +// them. +// +// These types are small wrappers around the Terraform protocol. It is assumed +// that developers using tfprotov5 are familiar with the protocol, its +// requirements, and its semantics. Developers not comfortable working with the +// raw protocol should use the github.com/hashicorp/terraform-plugin-sdk/v2 Go +// module instead, which offers a less verbose, safer way to develop a +// Terraform provider, albeit with less flexibility and power. +// +// Provider developers should start by defining a type that implements the +// `ProviderServer` interface. A struct is recommended, as it will allow you to +// store the configuration information attached to your provider for use in +// requests, but any type is technically possible. +// +// `ProviderServer` implementations will need to implement the composed +// interfaces, `ResourceServer` and `DataSourceServer`. It is recommended, but +// not required, to use an embedded `ResourceRouter` and `DataSourceRouter` in +// your `ProviderServer` to achieve this, which will let you handle requests +// for each resource and data source in a resource-specific or data +// source-specific function. +// +// To serve the `ProviderServer` implementation as a gRPC server that Terraform +// can connect to, use the `tfprotov5/server.Serve` function. +package tfprotov5 diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go new file mode 100644 index 00000000..951532cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go @@ -0,0 +1,91 @@ +package tfprotov5 + +import ( + "bytes" + "errors" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + "github.com/vmihailenco/msgpack" +) + +// ErrUnknownDynamicValueType is returned when a DynamicValue has no MsgPack or +// JSON bytes set. This should never be returned during the normal operation of +// a provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `DynamicValue` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownDynamicValueType = errors.New("DynamicValue had no JSON or msgpack data set") + +// NewDynamicValue creates a DynamicValue from a tftypes.Value. You must +// specify the tftype.Type you want to send the value as, and it must be a type +// that is compatible with the Type of the Value. Usually it should just be the +// Type of the Value, but it can also be the DynamicPseudoType. +func NewDynamicValue(t tftypes.Type, v tftypes.Value) (DynamicValue, error) { + b, err := v.MarshalMsgPack(t) //nolint:staticcheck + if err != nil { + return DynamicValue{}, err + } + return DynamicValue{ + MsgPack: b, + }, nil +} + +// DynamicValue represents a nested encoding value that came from the protocol. +// The only way providers should ever interact with it is by calling its +// `Unmarshal` method to retrive a `tftypes.Value`. Although the type system +// allows for other interactions, they are explicitly not supported, and will +// not be considered when evaluating for breaking changes. Treat this type as +// an opaque value, and *only* call its `Unmarshal` method. +type DynamicValue struct { + MsgPack []byte + JSON []byte +} + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the DynamicValue in an easy-to-interact-with way. It is the +// main purpose of the DynamicValue type, and is how provider developers should +// obtain config, state, and other values from the protocol. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same, as do user-specified values when the provider has a +// DynamicPseudoType in its schema. Objects and maps all look the same, as +// well, as do DynamicPseudoType values sometimes. Fortunately, the provider +// should already know the type; it should be the type of the schema, or +// PseudoDynamicType if that's what's in the schema. `Unmarshal` will then +// parse the value as though it belongs to that type, if possible, and return a +// `tftypes.Value` with the appropriate information. If the data can't be +// interpreted as that type, an error will be returned saying so. In these +// cases, double check to make sure the schema is declaring the same type being +// passed into `Unmarshal`. +// +// In the event an ErrUnknownDynamicValueType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `DynamicValue` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `DynamicValue`s +// received from RPC requests. +func (d DynamicValue) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if d.JSON != nil { + return jsonUnmarshal(d.JSON, typ, tftypes.AttributePath{}) + } + if d.MsgPack != nil { + r := bytes.NewReader(d.MsgPack) + dec := msgpack.NewDecoder(r) + return msgpackUnmarshal(dec, typ, tftypes.AttributePath{}) + } + return tftypes.Value{}, ErrUnknownDynamicValueType +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value_json.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value_json.go new file mode 100644 index 00000000..0d3dbc25 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value_json.go @@ -0,0 +1,480 @@ +package tfprotov5 + +import ( + "bytes" + "encoding/json" + "math/big" + "strings" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +func jsonByteDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} + +func jsonUnmarshal(buf []byte, typ tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + + if tok == nil { + return tftypes.NewValue(typ, nil), nil + } + + switch { + case typ.Is(tftypes.String): + return jsonUnmarshalString(buf, typ, p) + case typ.Is(tftypes.Number): + return jsonUnmarshalNumber(buf, typ, p) + case typ.Is(tftypes.Bool): + return jsonUnmarshalBool(buf, typ, p) + case typ.Is(tftypes.DynamicPseudoType): + return jsonUnmarshalDynamicPseudoType(buf, typ, p) + case typ.Is(tftypes.List{}): + return jsonUnmarshalList(buf, typ.(tftypes.List).ElementType, p) + case typ.Is(tftypes.Set{}): + return jsonUnmarshalSet(buf, typ.(tftypes.Set).ElementType, p) + + case typ.Is(tftypes.Map{}): + return jsonUnmarshalMap(buf, typ.(tftypes.Map).AttributeType, p) + case typ.Is(tftypes.Tuple{}): + return jsonUnmarshalTuple(buf, typ.(tftypes.Tuple).ElementTypes, p) + case typ.Is(tftypes.Object{}): + return jsonUnmarshalObject(buf, typ.(tftypes.Object).AttributeTypes, p) + } + return tftypes.Value{}, p.NewErrorf("unknown type %s", typ) +} + +func jsonUnmarshalString(buf []byte, typ tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + switch v := tok.(type) { + case string: + return tftypes.NewValue(tftypes.String, v), nil + case json.Number: + return tftypes.NewValue(tftypes.String, string(v)), nil + case bool: + if v { + return tftypes.NewValue(tftypes.String, "true"), nil + } + return tftypes.NewValue(tftypes.String, "false"), nil + } + return tftypes.Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, tftypes.String) +} + +func jsonUnmarshalNumber(buf []byte, typ tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + switch numTok := tok.(type) { + case json.Number: + f, _, err := big.ParseFloat(string(numTok), 10, 512, big.ToNearestEven) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error parsing number: %w", err) + } + return tftypes.NewValue(typ, f), nil + case string: + f, _, err := big.ParseFloat(string(numTok), 10, 512, big.ToNearestEven) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error parsing number: %w", err) + } + return tftypes.NewValue(typ, f), nil + } + return tftypes.Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, tftypes.Number) +} + +func jsonUnmarshalBool(buf []byte, typ tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + switch v := tok.(type) { + case bool: + return tftypes.NewValue(tftypes.Bool, v), nil + case string: + switch v { + case "true", "1": + return tftypes.NewValue(tftypes.Bool, true), nil + case "false", "0": + return tftypes.NewValue(tftypes.Bool, false), nil + } + switch strings.ToLower(string(v)) { + case "true": + return tftypes.Value{}, p.NewErrorf("to convert from string, use lowercase \"true\"") + case "false": + return tftypes.Value{}, p.NewErrorf("to convert from string, use lowercase \"false\"") + } + case json.Number: + switch v { + case "1": + return tftypes.NewValue(tftypes.Bool, true), nil + case "0": + return tftypes.NewValue(tftypes.Bool, false), nil + } + } + return tftypes.Value{}, p.NewErrorf("unsupported type %T sent as %s", tok, tftypes.Bool) +} + +func jsonUnmarshalDynamicPseudoType(buf []byte, typ tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('{') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) + } + var t tftypes.Type + var valBody []byte + for dec.More() { + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + key, ok := tok.(string) + if !ok { + return tftypes.Value{}, p.NewErrorf("expected key to be a string, got %T", tok) + } + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding value: %w", err) + } + switch key { + case "type": + t, err = tftypes.ParseJSONType(rawVal) //nolint:staticcheck + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding type information: %w", err) + } + case "value": + valBody = rawVal + default: + return tftypes.Value{}, p.NewErrorf("invalid key %q in dynamically-typed value", key) + } + } + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('}') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) + } + if t == nil { + return tftypes.Value{}, p.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return tftypes.Value{}, p.NewErrorf("missing value in dynamically-typed value") + } + return jsonUnmarshal(valBody, t, p) +} + +func jsonUnmarshalList(buf []byte, elementType tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('[') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('['), tok) + } + + // we want to have a value for this always, even if there are no + // elements, because no elements is *technically* different than empty, + // and we want to preserve that distinction + // + // var vals []tftypes.Value + // would evaluate as nil if the list is empty + // + // while generally in Go it's undesirable to treat empty and nil slices + // separately, in this case we're surfacing a non-Go-in-origin + // distinction, so we'll allow it. + vals := []tftypes.Value{} + + var idx int64 + for dec.More() { + p.WithElementKeyInt(idx) + // update the index + idx++ + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, elementType, p) + if err != nil { + return tftypes.Value{}, err + } + vals = append(vals, val) + p.WithoutLastStep() + } + + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim(']') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim(']'), tok) + } + + elTyp := elementType + if elTyp == tftypes.DynamicPseudoType { + elTyp, err = tftypes.TypeFromElements(vals) + if err != nil { + return tftypes.Value{}, p.NewErrorf("invalid elements for list: %w", err) + } + } + return tftypes.NewValue(tftypes.List{ + ElementType: elTyp, + }, vals), nil +} + +func jsonUnmarshalSet(buf []byte, elementType tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('[') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('['), tok) + } + + // we want to have a value for this always, even if there are no + // elements, because no elements is *technically* different than empty, + // and we want to preserve that distinction + // + // var vals []tftypes.Value + // would evaluate as nil if the set is empty + // + // while generally in Go it's undesirable to treat empty and nil slices + // separately, in this case we're surfacing a non-Go-in-origin + // distinction, so we'll allow it. + vals := []tftypes.Value{} + + for dec.More() { + p.WithElementKeyValue(tftypes.NewValue(elementType, tftypes.UnknownValue)) + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, elementType, p) + if err != nil { + return tftypes.Value{}, err + } + vals = append(vals, val) + p.WithoutLastStep() + } + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim(']') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim(']'), tok) + } + + elTyp := elementType + if elTyp == tftypes.DynamicPseudoType { + elTyp, err = tftypes.TypeFromElements(vals) + if err != nil { + return tftypes.Value{}, p.NewErrorf("invalid elements for list: %w", err) + } + } + return tftypes.NewValue(tftypes.Set{ + ElementType: elTyp, + }, vals), nil +} + +func jsonUnmarshalMap(buf []byte, attrType tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('{') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) + } + + vals := map[string]tftypes.Value{} + for dec.More() { + p.WithElementKeyValue(tftypes.NewValue(attrType, tftypes.UnknownValue)) + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + key, ok := tok.(string) + if !ok { + return tftypes.Value{}, p.NewErrorf("expected map key to be a string, got %T", tok) + } + + //fix the path value, we have an actual key now + p.WithoutLastStep() + p.WithElementKeyString(key) + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, attrType, p) + if err != nil { + return tftypes.Value{}, err + } + vals[key] = val + p.WithoutLastStep() + } + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('}') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) + } + + return tftypes.NewValue(tftypes.Map{ + AttributeType: attrType, + }, vals), nil +} + +func jsonUnmarshalTuple(buf []byte, elementTypes []tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('[') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('['), tok) + } + + // we want to have a value for this always, even if there are no + // elements, because no elements is *technically* different than empty, + // and we want to preserve that distinction + // + // var vals []tftypes.Value + // would evaluate as nil if the tuple is empty + // + // while generally in Go it's undesirable to treat empty and nil slices + // separately, in this case we're surfacing a non-Go-in-origin + // distinction, so we'll allow it. + vals := []tftypes.Value{} + + var idx int64 + for dec.More() { + if idx >= int64(len(elementTypes)) { + return tftypes.Value{}, p.NewErrorf("too many tuple elements (only have types for %d)", len(elementTypes)) + } + + p.WithElementKeyInt(idx) + elementType := elementTypes[idx] + idx++ + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, elementType, p) + if err != nil { + return tftypes.Value{}, err + } + vals = append(vals, val) + p.WithoutLastStep() + } + + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim(']') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim(']'), tok) + } + + if len(vals) != len(elementTypes) { + return tftypes.Value{}, p.NewErrorf("not enough tuple elements (only have %d, have types for %d)", len(vals), len(elementTypes)) + } + + return tftypes.NewValue(tftypes.Tuple{ + ElementTypes: elementTypes, + }, vals), nil +} + +func jsonUnmarshalObject(buf []byte, attrTypes map[string]tftypes.Type, p tftypes.AttributePath) (tftypes.Value, error) { + dec := jsonByteDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('{') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('{'), tok) + } + + vals := map[string]tftypes.Value{} + for dec.More() { + p.WithElementKeyValue(tftypes.NewValue(tftypes.String, tftypes.UnknownValue)) + tok, err := dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + key, ok := tok.(string) + if !ok { + return tftypes.Value{}, p.NewErrorf("object attribute key was %T, not string", tok) + } + attrType, ok := attrTypes[key] + if !ok { + return tftypes.Value{}, p.NewErrorf("unsupported attribute %q", key) + } + p.WithoutLastStep() + p.WithAttributeName(key) + + var rawVal json.RawMessage + err = dec.Decode(&rawVal) + if err != nil { + return tftypes.Value{}, p.NewErrorf("error decoding value: %w", err) + } + val, err := jsonUnmarshal(rawVal, attrType, p) + if err != nil { + return tftypes.Value{}, err + } + vals[key] = val + p.WithoutLastStep() + } + + tok, err = dec.Token() + if err != nil { + return tftypes.Value{}, p.NewErrorf("error reading token: %w", err) + } + if tok != json.Delim('}') { + return tftypes.Value{}, p.NewErrorf("invalid JSON, expected %q, got %q", json.Delim('}'), tok) + } + + // make sure we have a value for every attribute + for k, typ := range attrTypes { + if _, ok := vals[k]; !ok { + vals[k] = tftypes.NewValue(typ, nil) + } + } + + return tftypes.NewValue(tftypes.Object{ + AttributeTypes: attrTypes, + }, vals), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value_msgpack.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value_msgpack.go new file mode 100644 index 00000000..45b78d79 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value_msgpack.go @@ -0,0 +1,327 @@ +package tfprotov5 + +import ( + "math/big" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" +) + +func msgpackUnmarshal(dec *msgpack.Decoder, typ tftypes.Type, path tftypes.AttributePath) (tftypes.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error peeking next byte: %w", err) + } + if msgpackCodes.IsExt(peek) { + // as with go-cty, assume all extensions are unknown values + err := dec.Skip() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error skipping extension byte: %w", err) + } + return tftypes.NewValue(typ, tftypes.UnknownValue), nil + } + if typ.Is(tftypes.DynamicPseudoType) { + return msgpackUnmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + err := dec.Skip() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error skipping nil byte: %w", err) + } + return tftypes.NewValue(typ, nil), nil + } + + switch { + case typ.Is(tftypes.String): + rv, err := dec.DecodeString() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding string: %w", err) + } + return tftypes.NewValue(tftypes.String, rv), nil + case typ.Is(tftypes.Number): + peek, err := dec.PeekCode() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't peek number: %w", err) + } + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) + } + return tftypes.NewValue(tftypes.Number, big.NewFloat(float64(rv))), nil + } + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't decode number as int64: %w", err) + } + return tftypes.NewValue(tftypes.Number, big.NewFloat(float64(rv))), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't decode number as uint64: %w", err) + } + return tftypes.NewValue(tftypes.Number, big.NewFloat(float64(rv))), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't decode number as float64: %w", err) + } + return tftypes.NewValue(tftypes.Number, big.NewFloat(float64(rv))), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't decode number as string: %w", err) + } + // according to + // https://github.com/hashicorp/go-cty/blob/85980079f637862fa8e43ddc82dd74315e2f4c85/cty/value_init.go#L49 + // Base 10, precision 512, and rounding to nearest even + // is the standard way to handle numbers arriving as + // strings. + fv, _, err := big.ParseFloat(rv, 10, 512, big.ToNearestEven) + if err != nil { + return tftypes.Value{}, path.NewErrorf("error parsing %q as number: %w", rv, err) + } + return tftypes.NewValue(tftypes.Number, fv), nil + } + case typ.Is(tftypes.Bool): + rv, err := dec.DecodeBool() + if err != nil { + return tftypes.Value{}, path.NewErrorf("couldn't decode bool: %w", err) + } + return tftypes.NewValue(tftypes.Bool, rv), nil + case typ.Is(tftypes.List{}): + return msgpackUnmarshalList(dec, typ.(tftypes.List).ElementType, path) + case typ.Is(tftypes.Set{}): + return msgpackUnmarshalSet(dec, typ.(tftypes.Set).ElementType, path) + case typ.Is(tftypes.Map{}): + return msgpackUnmarshalMap(dec, typ.(tftypes.Map).AttributeType, path) + case typ.Is(tftypes.Tuple{}): + return msgpackUnmarshalTuple(dec, typ.(tftypes.Tuple).ElementTypes, path) + case typ.Is(tftypes.Object{}): + return msgpackUnmarshalObject(dec, typ.(tftypes.Object).AttributeTypes, path) + } + return tftypes.Value{}, path.NewErrorf("unsupported type %s", typ.String()) +} + +func msgpackUnmarshalList(dec *msgpack.Decoder, typ tftypes.Type, path tftypes.AttributePath) (tftypes.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding list length: %w", err) + } + + switch { + case length < 0: + return tftypes.NewValue(tftypes.List{ + ElementType: typ, + }, nil), nil + case length == 0: + return tftypes.NewValue(tftypes.List{ + ElementType: typ, + }, []tftypes.Value{}), nil + } + + vals := make([]tftypes.Value, 0, length) + for i := 0; i < length; i++ { + path.WithElementKeyInt(int64(i)) + val, err := msgpackUnmarshal(dec, typ, path) + if err != nil { + return tftypes.Value{}, err + } + vals = append(vals, val) + path.WithoutLastStep() + } + + elTyp := typ + if elTyp == tftypes.DynamicPseudoType { + elTyp, err = tftypes.TypeFromElements(vals) + if err != nil { + return tftypes.Value{}, err + } + } + + return tftypes.NewValue(tftypes.List{ + ElementType: elTyp, + }, vals), nil +} + +func msgpackUnmarshalSet(dec *msgpack.Decoder, typ tftypes.Type, path tftypes.AttributePath) (tftypes.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding set length: %w", err) + } + + switch { + case length < 0: + return tftypes.NewValue(tftypes.Set{ + ElementType: typ, + }, nil), nil + case length == 0: + return tftypes.NewValue(tftypes.Set{ + ElementType: typ, + }, []tftypes.Value{}), nil + } + + vals := make([]tftypes.Value, 0, length) + for i := 0; i < length; i++ { + path.WithElementKeyInt(int64(i)) + val, err := msgpackUnmarshal(dec, typ, path) + if err != nil { + return tftypes.Value{}, err + } + vals = append(vals, val) + path.WithoutLastStep() + } + + elTyp, err := tftypes.TypeFromElements(vals) + if err != nil { + return tftypes.Value{}, err + } + + return tftypes.NewValue(tftypes.Set{ + ElementType: elTyp, + }, vals), nil +} + +func msgpackUnmarshalMap(dec *msgpack.Decoder, typ tftypes.Type, path tftypes.AttributePath) (tftypes.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding map length: %w", err) + } + + switch { + case length < 0: + return tftypes.NewValue(tftypes.Map{ + AttributeType: typ, + }, nil), nil + case length == 0: + return tftypes.NewValue(tftypes.Map{ + AttributeType: typ, + }, map[string]tftypes.Value{}), nil + } + + vals := make(map[string]tftypes.Value, length) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding map key: %w", err) + } + path.WithElementKeyString(key) + val, err := msgpackUnmarshal(dec, typ, path) + if err != nil { + return tftypes.Value{}, err + } + vals[key] = val + path.WithoutLastStep() + } + return tftypes.NewValue(tftypes.Map{ + AttributeType: typ, + }, vals), nil +} + +func msgpackUnmarshalTuple(dec *msgpack.Decoder, types []tftypes.Type, path tftypes.AttributePath) (tftypes.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding tuple length: %w", err) + } + + switch { + case length < 0: + return tftypes.NewValue(tftypes.Tuple{ + ElementTypes: types, + }, nil), nil + case length == 0: + return tftypes.NewValue(tftypes.Tuple{ + // no elements means no types + ElementTypes: nil, + }, []tftypes.Value{}), nil + case length != len(types): + return tftypes.Value{}, path.NewErrorf("error decoding tuple; expected %d items, got %d", len(types), length) + } + + vals := make([]tftypes.Value, 0, length) + for i := 0; i < length; i++ { + path.WithElementKeyInt(int64(i)) + typ := types[i] + val, err := msgpackUnmarshal(dec, typ, path) + if err != nil { + return tftypes.Value{}, err + } + vals = append(vals, val) + path.WithoutLastStep() + } + + return tftypes.NewValue(tftypes.Tuple{ + ElementTypes: types, + }, vals), nil +} + +func msgpackUnmarshalObject(dec *msgpack.Decoder, types map[string]tftypes.Type, path tftypes.AttributePath) (tftypes.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding object length: %w", err) + } + + switch { + case length < 0: + return tftypes.NewValue(tftypes.Object{ + AttributeTypes: types, + }, nil), nil + case length == 0: + return tftypes.NewValue(tftypes.Object{ + // no attributes means no types + AttributeTypes: nil, + }, map[string]tftypes.Value{}), nil + case length != len(types): + return tftypes.Value{}, path.NewErrorf("error decoding object; expected %d attributes, got %d", len(types), length) + } + + vals := make(map[string]tftypes.Value, length) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding object key: %w", err) + } + typ, exists := types[key] + if !exists { + return tftypes.Value{}, path.NewErrorf("unknown attribute %q", key) + } + path.WithAttributeName(key) + val, err := msgpackUnmarshal(dec, typ, path) + if err != nil { + return tftypes.Value{}, err + } + vals[key] = val + path.WithoutLastStep() + } + + return tftypes.NewValue(tftypes.Object{ + AttributeTypes: types, + }, vals), nil +} + +func msgpackUnmarshalDynamic(dec *msgpack.Decoder, path tftypes.AttributePath) (tftypes.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error checking length of DynamicPseudoType value: %w", err) + } + + switch { + case length == -1: + return tftypes.NewValue(tftypes.DynamicPseudoType, nil), nil + case length != 2: + return tftypes.Value{}, path.NewErrorf("expected %d elements in DynamicPseudoType array, got %d", 2, length) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return tftypes.Value{}, path.NewErrorf("error decoding bytes: %w", err) + } + typ, err := tftypes.ParseJSONType(typeJSON) //nolint:staticcheck + if err != nil { + return tftypes.Value{}, path.NewErrorf("error parsing type information: %w", err) + } + return msgpackUnmarshal(dec, typ, path) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/attribute_path.go new file mode 100644 index 00000000..6630160f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/attribute_path.go @@ -0,0 +1,65 @@ +package fromproto + +import ( + "errors" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +var ErrUnknownAttributePathStepType = errors.New("unknown type of AttributePath_Step") + +func AttributePath(in *tfplugin5.AttributePath) (*tftypes.AttributePath, error) { + steps, err := AttributePathSteps(in.Steps) + if err != nil { + return nil, err + } + return &tftypes.AttributePath{ + Steps: steps, + }, nil +} + +func AttributePaths(in []*tfplugin5.AttributePath) ([]*tftypes.AttributePath, error) { + resp := make([]*tftypes.AttributePath, 0, len(in)) + for _, a := range in { + if a == nil { + resp = append(resp, nil) + continue + } + attr, err := AttributePath(a) + if err != nil { + return resp, err + } + resp = append(resp, attr) + } + return resp, nil +} + +func AttributePathStep(step *tfplugin5.AttributePath_Step) (tftypes.AttributePathStep, error) { + selector := step.GetSelector() + if v, ok := selector.(*tfplugin5.AttributePath_Step_AttributeName); ok { + return tftypes.AttributeName(v.AttributeName), nil + } + if v, ok := selector.(*tfplugin5.AttributePath_Step_ElementKeyString); ok { + return tftypes.ElementKeyString(v.ElementKeyString), nil + } + if v, ok := selector.(*tfplugin5.AttributePath_Step_ElementKeyInt); ok { + return tftypes.ElementKeyInt(v.ElementKeyInt), nil + } + return nil, ErrUnknownAttributePathStepType +} + +func AttributePathSteps(in []*tfplugin5.AttributePath_Step) ([]tftypes.AttributePathStep, error) { + resp := make([]tftypes.AttributePathStep, 0, len(in)) + for _, step := range in { + if step == nil { + continue + } + s, err := AttributePathStep(step) + if err != nil { + return resp, err + } + resp = append(resp, s) + } + return resp, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go new file mode 100644 index 00000000..bb1ad542 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go @@ -0,0 +1,53 @@ +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ValidateDataSourceConfigRequest(in *tfplugin5.ValidateDataSourceConfig_Request) (*tfprotov5.ValidateDataSourceConfigRequest, error) { + resp := &tfprotov5.ValidateDataSourceConfigRequest{ + TypeName: in.TypeName, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func ValidateDataSourceConfigResponse(in *tfplugin5.ValidateDataSourceConfig_Response) (*tfprotov5.ValidateDataSourceConfigResponse, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfprotov5.ValidateDataSourceConfigResponse{ + Diagnostics: diags, + }, nil +} + +func ReadDataSourceRequest(in *tfplugin5.ReadDataSource_Request) (*tfprotov5.ReadDataSourceRequest, error) { + resp := &tfprotov5.ReadDataSourceRequest{ + TypeName: in.TypeName, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func ReadDataSourceResponse(in *tfplugin5.ReadDataSource_Response) (*tfprotov5.ReadDataSourceResponse, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + resp := &tfprotov5.ReadDataSourceResponse{ + Diagnostics: diags, + } + if in.State != nil { + resp.State = DynamicValue(in.State) + } + return resp, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/diagnostic.go new file mode 100644 index 00000000..e0feaea5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/diagnostic.go @@ -0,0 +1,42 @@ +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func Diagnostic(in *tfplugin5.Diagnostic) (*tfprotov5.Diagnostic, error) { + diag := &tfprotov5.Diagnostic{ + Severity: DiagnosticSeverity(in.Severity), + Summary: in.Summary, + Detail: in.Detail, + } + if in.Attribute != nil { + attr, err := AttributePath(in.Attribute) + if err != nil { + return diag, err + } + diag.Attribute = attr + } + return diag, nil +} + +func DiagnosticSeverity(in tfplugin5.Diagnostic_Severity) tfprotov5.DiagnosticSeverity { + return tfprotov5.DiagnosticSeverity(in) +} + +func Diagnostics(in []*tfplugin5.Diagnostic) ([]*tfprotov5.Diagnostic, error) { + diagnostics := make([]*tfprotov5.Diagnostic, 0, len(in)) + for _, diag := range in { + if diag == nil { + diagnostics = append(diagnostics, nil) + continue + } + d, err := Diagnostic(diag) + if err != nil { + return diagnostics, err + } + diagnostics = append(diagnostics, d) + } + return diagnostics, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go new file mode 100644 index 00000000..8184bebf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go @@ -0,0 +1,109 @@ +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetProviderSchemaRequest(in *tfplugin5.GetProviderSchema_Request) (*tfprotov5.GetProviderSchemaRequest, error) { + return &tfprotov5.GetProviderSchemaRequest{}, nil +} + +func GetProviderSchemaResponse(in *tfplugin5.GetProviderSchema_Response) (*tfprotov5.GetProviderSchemaResponse, error) { + var resp tfprotov5.GetProviderSchemaResponse + if in.Provider != nil { + schema, err := Schema(in.Provider) + if err != nil { + return &resp, err + } + resp.Provider = schema + } + if in.ProviderMeta != nil { + schema, err := Schema(in.ProviderMeta) + if err != nil { + return &resp, err + } + resp.ProviderMeta = schema + } + resp.ResourceSchemas = make(map[string]*tfprotov5.Schema, len(in.ResourceSchemas)) + for k, v := range in.ResourceSchemas { + if v == nil { + resp.ResourceSchemas[k] = nil + continue + } + schema, err := Schema(v) + if err != nil { + return &resp, err + } + resp.ResourceSchemas[k] = schema + } + resp.DataSourceSchemas = make(map[string]*tfprotov5.Schema, len(in.DataSourceSchemas)) + for k, v := range in.DataSourceSchemas { + if v == nil { + resp.DataSourceSchemas[k] = nil + continue + } + schema, err := Schema(v) + if err != nil { + return &resp, err + } + resp.DataSourceSchemas[k] = schema + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return &resp, err + } + resp.Diagnostics = diags + return &resp, nil +} + +func PrepareProviderConfigRequest(in *tfplugin5.PrepareProviderConfig_Request) (*tfprotov5.PrepareProviderConfigRequest, error) { + var resp tfprotov5.PrepareProviderConfigRequest + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return &resp, nil +} + +func PrepareProviderConfigResponse(in *tfplugin5.PrepareProviderConfig_Response) (*tfprotov5.PrepareProviderConfigResponse, error) { + var resp tfprotov5.PrepareProviderConfigResponse + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + resp.Diagnostics = diags + if in.PreparedConfig != nil { + resp.PreparedConfig = DynamicValue(in.PreparedConfig) + } + return &resp, nil +} + +func ConfigureProviderRequest(in *tfplugin5.Configure_Request) (*tfprotov5.ConfigureProviderRequest, error) { + resp := &tfprotov5.ConfigureProviderRequest{ + TerraformVersion: in.TerraformVersion, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func ConfigureProviderResponse(in *tfplugin5.Configure_Response) (*tfprotov5.ConfigureProviderResponse, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfprotov5.ConfigureProviderResponse{ + Diagnostics: diags, + }, nil +} + +func StopProviderRequest(in *tfplugin5.Stop_Request) (*tfprotov5.StopProviderRequest, error) { + return &tfprotov5.StopProviderRequest{}, nil +} + +func StopProviderResponse(in *tfplugin5.Stop_Response) (*tfprotov5.StopProviderResponse, error) { + return &tfprotov5.StopProviderResponse{ + Error: in.Error, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go new file mode 100644 index 00000000..9b7d505b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go @@ -0,0 +1,208 @@ +package fromproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ValidateResourceTypeConfigRequest(in *tfplugin5.ValidateResourceTypeConfig_Request) (*tfprotov5.ValidateResourceTypeConfigRequest, error) { + resp := &tfprotov5.ValidateResourceTypeConfigRequest{ + TypeName: in.TypeName, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func ValidateResourceTypeConfigResponse(in *tfplugin5.ValidateResourceTypeConfig_Response) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfprotov5.ValidateResourceTypeConfigResponse{ + Diagnostics: diags, + }, nil +} + +func UpgradeResourceStateRequest(in *tfplugin5.UpgradeResourceState_Request) (*tfprotov5.UpgradeResourceStateRequest, error) { + resp := &tfprotov5.UpgradeResourceStateRequest{ + TypeName: in.TypeName, + Version: in.Version, + } + if in.RawState != nil { + resp.RawState = RawState(in.RawState) + } + return resp, nil +} + +func UpgradeResourceStateResponse(in *tfplugin5.UpgradeResourceState_Response) (*tfprotov5.UpgradeResourceStateResponse, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + resp := &tfprotov5.UpgradeResourceStateResponse{ + Diagnostics: diags, + } + if in.UpgradedState != nil { + resp.UpgradedState = DynamicValue(in.UpgradedState) + } + return resp, nil +} + +func ReadResourceRequest(in *tfplugin5.ReadResource_Request) (*tfprotov5.ReadResourceRequest, error) { + resp := &tfprotov5.ReadResourceRequest{ + TypeName: in.TypeName, + Private: in.Private, + } + if in.CurrentState != nil { + resp.CurrentState = DynamicValue(in.CurrentState) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func ReadResourceResponse(in *tfplugin5.ReadResource_Response) (*tfprotov5.ReadResourceResponse, error) { + resp := &tfprotov5.ReadResourceResponse{ + Private: in.Private, + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return resp, err + } + resp.Diagnostics = diags + if in.NewState != nil { + resp.NewState = DynamicValue(in.NewState) + } + return resp, nil +} + +func PlanResourceChangeRequest(in *tfplugin5.PlanResourceChange_Request) (*tfprotov5.PlanResourceChangeRequest, error) { + resp := &tfprotov5.PlanResourceChangeRequest{ + TypeName: in.TypeName, + PriorPrivate: in.PriorPrivate, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + if in.PriorState != nil { + resp.PriorState = DynamicValue(in.PriorState) + } + if in.ProposedNewState != nil { + resp.ProposedNewState = DynamicValue(in.ProposedNewState) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func PlanResourceChangeResponse(in *tfplugin5.PlanResourceChange_Response) (*tfprotov5.PlanResourceChangeResponse, error) { + resp := &tfprotov5.PlanResourceChangeResponse{ + PlannedPrivate: in.PlannedPrivate, + UnsafeToUseLegacyTypeSystem: in.LegacyTypeSystem, + } + attributePaths, err := AttributePaths(in.RequiresReplace) + if err != nil { + return resp, err + } + resp.RequiresReplace = attributePaths + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return resp, err + } + resp.Diagnostics = diags + if in.PlannedState != nil { + resp.PlannedState = DynamicValue(in.PlannedState) + } + return resp, nil +} + +func ApplyResourceChangeRequest(in *tfplugin5.ApplyResourceChange_Request) (*tfprotov5.ApplyResourceChangeRequest, error) { + resp := &tfprotov5.ApplyResourceChangeRequest{ + TypeName: in.TypeName, + PlannedPrivate: in.PlannedPrivate, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + if in.PriorState != nil { + resp.PriorState = DynamicValue(in.PriorState) + } + if in.PlannedState != nil { + resp.PlannedState = DynamicValue(in.PlannedState) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func ApplyResourceChangeResponse(in *tfplugin5.ApplyResourceChange_Response) (*tfprotov5.ApplyResourceChangeResponse, error) { + resp := &tfprotov5.ApplyResourceChangeResponse{ + Private: in.Private, + UnsafeToUseLegacyTypeSystem: in.LegacyTypeSystem, + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return resp, err + } + resp.Diagnostics = diags + if in.NewState != nil { + resp.NewState = DynamicValue(in.NewState) + } + return resp, nil +} + +func ImportResourceStateRequest(in *tfplugin5.ImportResourceState_Request) (*tfprotov5.ImportResourceStateRequest, error) { + return &tfprotov5.ImportResourceStateRequest{ + TypeName: in.TypeName, + ID: in.Id, + }, nil +} + +func ImportResourceStateResponse(in *tfplugin5.ImportResourceState_Response) (*tfprotov5.ImportResourceStateResponse, error) { + imported, err := ImportedResources(in.ImportedResources) + if err != nil { + return nil, err + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfprotov5.ImportResourceStateResponse{ + ImportedResources: imported, + Diagnostics: diags, + }, nil +} + +func ImportedResource(in *tfplugin5.ImportResourceState_ImportedResource) (*tfprotov5.ImportedResource, error) { + resp := &tfprotov5.ImportedResource{ + TypeName: in.TypeName, + Private: in.Private, + } + if in.State != nil { + resp.State = DynamicValue(in.State) + } + return resp, nil +} + +func ImportedResources(in []*tfplugin5.ImportResourceState_ImportedResource) ([]*tfprotov5.ImportedResource, error) { + resp := make([]*tfprotov5.ImportedResource, 0, len(in)) + for pos, i := range in { + if i == nil { + resp = append(resp, nil) + continue + } + r, err := ImportedResource(i) + if err != nil { + return resp, fmt.Errorf("Error converting imported resource %d/%d: %w", pos+1, len(in), err) + } + resp = append(resp, r) + } + return resp, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/schema.go new file mode 100644 index 00000000..beff9ca3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/schema.go @@ -0,0 +1,114 @@ +package fromproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +func Schema(in *tfplugin5.Schema) (*tfprotov5.Schema, error) { + var resp tfprotov5.Schema + resp.Version = in.Version + if in.Block != nil { + block, err := SchemaBlock(in.Block) + if err != nil { + return &resp, err + } + resp.Block = block + } + return &resp, nil +} + +func SchemaBlock(in *tfplugin5.Schema_Block) (*tfprotov5.SchemaBlock, error) { + resp := &tfprotov5.SchemaBlock{ + Version: in.Version, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Deprecated: in.Deprecated, + } + attrs, err := SchemaAttributes(in.Attributes) + if err != nil { + return resp, err + } + resp.Attributes = attrs + blocks, err := SchemaNestedBlocks(in.BlockTypes) + if err != nil { + return resp, err + } + resp.BlockTypes = blocks + return resp, nil +} + +func SchemaAttribute(in *tfplugin5.Schema_Attribute) (*tfprotov5.SchemaAttribute, error) { + resp := &tfprotov5.SchemaAttribute{ + Name: in.Name, + Description: in.Description, + Required: in.Required, + Optional: in.Optional, + Computed: in.Computed, + Sensitive: in.Sensitive, + DescriptionKind: StringKind(in.DescriptionKind), + Deprecated: in.Deprecated, + } + typ, err := tftypes.ParseJSONType(in.Type) //nolint:staticcheck + if err != nil { + return resp, err + } + resp.Type = typ + return resp, nil +} + +func SchemaAttributes(in []*tfplugin5.Schema_Attribute) ([]*tfprotov5.SchemaAttribute, error) { + resp := make([]*tfprotov5.SchemaAttribute, 0, len(in)) + for pos, a := range in { + if a == nil { + resp = append(resp, nil) + continue + } + attr, err := SchemaAttribute(a) + if err != nil { + return resp, fmt.Errorf("error converting schema attribute %d: %w", pos, err) + } + resp = append(resp, attr) + } + return resp, nil +} + +func SchemaNestedBlock(in *tfplugin5.Schema_NestedBlock) (*tfprotov5.SchemaNestedBlock, error) { + resp := &tfprotov5.SchemaNestedBlock{ + TypeName: in.TypeName, + Nesting: SchemaNestedBlockNestingMode(in.Nesting), + MinItems: in.MinItems, + MaxItems: in.MaxItems, + } + if in.Block != nil { + block, err := SchemaBlock(in.Block) + if err != nil { + return resp, err + } + resp.Block = block + } + return resp, nil +} + +func SchemaNestedBlocks(in []*tfplugin5.Schema_NestedBlock) ([]*tfprotov5.SchemaNestedBlock, error) { + resp := make([]*tfprotov5.SchemaNestedBlock, 0, len(in)) + for pos, b := range in { + if b == nil { + resp = append(resp, nil) + continue + } + block, err := SchemaNestedBlock(b) + if err != nil { + return resp, fmt.Errorf("error converting nested block %d: %w", pos, err) + } + resp = append(resp, block) + } + return resp, nil +} + +func SchemaNestedBlockNestingMode(in tfplugin5.Schema_NestedBlock_NestingMode) tfprotov5.SchemaNestedBlockNestingMode { + return tfprotov5.SchemaNestedBlockNestingMode(in) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/state.go new file mode 100644 index 00000000..ad89004d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/state.go @@ -0,0 +1,13 @@ +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func RawState(in *tfplugin5.RawState) *tfprotov5.RawState { + return &tfprotov5.RawState{ + JSON: in.Json, + Flatmap: in.Flatmap, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/string_kind.go new file mode 100644 index 00000000..c15d2df4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/string_kind.go @@ -0,0 +1,10 @@ +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func StringKind(in tfplugin5.StringKind) tfprotov5.StringKind { + return tfprotov5.StringKind(in) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/types.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/types.go new file mode 100644 index 00000000..17f31f85 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/types.go @@ -0,0 +1,13 @@ +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func DynamicValue(in *tfplugin5.DynamicValue) *tfprotov5.DynamicValue { + return &tfprotov5.DynamicValue{ + MsgPack: in.Msgpack, + JSON: in.Json, + } +} diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh similarity index 76% rename from vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh rename to vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh index de1d693c..ac9ec74b 100644 --- a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/generate.sh @@ -13,4 +13,4 @@ DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" cd "$DIR" -protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ +protoc --proto_path=. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative tfplugin5.proto diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 00000000..793eed7a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,4608 @@ +// Terraform Plugin RPC protocol version 5.2 +// +// This file defines version 5.2 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the master +// branch or any other development branch. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.13.0 +// source: tfplugin5.proto + +package tfplugin5 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type StringKind int32 + +const ( + StringKind_PLAIN StringKind = 0 + StringKind_MARKDOWN StringKind = 1 +) + +// Enum value maps for StringKind. +var ( + StringKind_name = map[int32]string{ + 0: "PLAIN", + 1: "MARKDOWN", + } + StringKind_value = map[string]int32{ + "PLAIN": 0, + "MARKDOWN": 1, + } +) + +func (x StringKind) Enum() *StringKind { + p := new(StringKind) + *p = x + return p +} + +func (x StringKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StringKind) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[0].Descriptor() +} + +func (StringKind) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[0] +} + +func (x StringKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StringKind.Descriptor instead. +func (StringKind) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{0} +} + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +// Enum value maps for Diagnostic_Severity. +var ( + Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", + } + Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, + } +) + +func (x Diagnostic_Severity) Enum() *Diagnostic_Severity { + p := new(Diagnostic_Severity) + *p = x + return p +} + +func (x Diagnostic_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Diagnostic_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[1].Descriptor() +} + +func (Diagnostic_Severity) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[1] +} + +func (x Diagnostic_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Diagnostic_Severity.Descriptor instead. +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +// Enum value maps for Schema_NestedBlock_NestingMode. +var ( + Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", + } + Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, + } +) + +func (x Schema_NestedBlock_NestingMode) Enum() *Schema_NestedBlock_NestingMode { + p := new(Schema_NestedBlock_NestingMode) + *p = x + return p +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_NestedBlock_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[2].Descriptor() +} + +func (Schema_NestedBlock_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[2] +} + +func (x Schema_NestedBlock_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_NestedBlock_NestingMode.Descriptor instead. +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` +} + +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. +func (*DynamicValue) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{0} +} + +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack + } + return nil +} + +func (x *DynamicValue) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +type Diagnostic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` +} + +func (x *Diagnostic) Reset() { + *x = Diagnostic{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostic) ProtoMessage() {} + +func (x *Diagnostic) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostic.ProtoReflect.Descriptor instead. +func (*Diagnostic) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{1} +} + +func (x *Diagnostic) GetSeverity() Diagnostic_Severity { + if x != nil { + return x.Severity + } + return Diagnostic_INVALID +} + +func (x *Diagnostic) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Diagnostic) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +func (x *Diagnostic) GetAttribute() *AttributePath { + if x != nil { + return x.Attribute + } + return nil +} + +type AttributePath struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` +} + +func (x *AttributePath) Reset() { + *x = AttributePath{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath) ProtoMessage() {} + +func (x *AttributePath) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath.ProtoReflect.Descriptor instead. +func (*AttributePath) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{2} +} + +func (x *AttributePath) GetSteps() []*AttributePath_Step { + if x != nil { + return x.Steps + } + return nil +} + +type Stop struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Stop) Reset() { + *x = Stop{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop) ProtoMessage() {} + +func (x *Stop) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop.ProtoReflect.Descriptor instead. +func (*Stop) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3} +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RawState) Reset() { + *x = RawState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawState) ProtoMessage() {} + +func (x *RawState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawState.ProtoReflect.Descriptor instead. +func (*RawState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4} +} + +func (x *RawState) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +func (x *RawState) GetFlatmap() map[string]string { + if x != nil { + return x.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5} +} + +func (x *Schema) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +type GetProviderSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema) Reset() { + *x = GetProviderSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema) ProtoMessage() {} + +func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6} +} + +type PrepareProviderConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PrepareProviderConfig) Reset() { + *x = PrepareProviderConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig) ProtoMessage() {} + +func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7} +} + +type UpgradeResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpgradeResourceState) Reset() { + *x = UpgradeResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState) ProtoMessage() {} + +func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8} +} + +type ValidateResourceTypeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateResourceTypeConfig) Reset() { + *x = ValidateResourceTypeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9} +} + +type ValidateDataSourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateDataSourceConfig) Reset() { + *x = ValidateDataSourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig) ProtoMessage() {} + +func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10} +} + +type Configure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Configure) Reset() { + *x = Configure{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure) ProtoMessage() {} + +func (x *Configure) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure.ProtoReflect.Descriptor instead. +func (*Configure) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11} +} + +type ReadResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadResource) Reset() { + *x = ReadResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource) ProtoMessage() {} + +func (x *ReadResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. +func (*ReadResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12} +} + +type PlanResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PlanResourceChange) Reset() { + *x = PlanResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange) ProtoMessage() {} + +func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13} +} + +type ApplyResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApplyResourceChange) Reset() { + *x = ApplyResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange) ProtoMessage() {} + +func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14} +} + +type ImportResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ImportResourceState) Reset() { + *x = ImportResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState) ProtoMessage() {} + +func (x *ImportResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15} +} + +type ReadDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadDataSource) Reset() { + *x = ReadDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource) ProtoMessage() {} + +func (x *ReadDataSource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16} +} + +type GetProvisionerSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProvisionerSchema) Reset() { + *x = GetProvisionerSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema) ProtoMessage() {} + +func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17} +} + +type ValidateProvisionerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateProvisionerConfig) Reset() { + *x = ValidateProvisionerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig) ProtoMessage() {} + +func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18} +} + +type ProvisionResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ProvisionResource) Reset() { + *x = ProvisionResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource) ProtoMessage() {} + +func (x *ProvisionResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource.ProtoReflect.Descriptor instead. +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19} +} + +type AttributePath_Step struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` +} + +func (x *AttributePath_Step) Reset() { + *x = AttributePath_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath_Step) ProtoMessage() {} + +func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath_Step.ProtoReflect.Descriptor instead. +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{2, 0} +} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *AttributePath_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyString() string { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +type Stop_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Stop_Request) Reset() { + *x = Stop_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop_Request) ProtoMessage() {} + +func (x *Stop_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop_Request.ProtoReflect.Descriptor instead. +func (*Stop_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3, 0} +} + +type Stop_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *Stop_Response) Reset() { + *x = Stop_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop_Response) ProtoMessage() {} + +func (x *Stop_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop_Response.ProtoReflect.Descriptor instead. +func (*Stop_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Stop_Response) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type Schema_Block struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Block) Reset() { + *x = Schema_Block{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Block) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Block) ProtoMessage() {} + +func (x *Schema_Block) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Block.ProtoReflect.Descriptor instead. +func (*Schema_Block) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *Schema_Block) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema_Block) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if x != nil { + return x.BlockTypes + } + return nil +} + +func (x *Schema_Block) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Block) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Block) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Attribute) Reset() { + *x = Schema_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Attribute) ProtoMessage() {} + +func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Attribute.ProtoReflect.Descriptor instead. +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5, 1} +} + +func (x *Schema_Attribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Schema_Attribute) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema_Attribute) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Attribute) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Schema_Attribute) GetOptional() bool { + if x != nil { + return x.Optional + } + return false +} + +func (x *Schema_Attribute) GetComputed() bool { + if x != nil { + return x.Computed + } + return false +} + +func (x *Schema_Attribute) GetSensitive() bool { + if x != nil { + return x.Sensitive + } + return false +} + +func (x *Schema_Attribute) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Attribute) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_NestedBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_NestedBlock) Reset() { + *x = Schema_NestedBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_NestedBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_NestedBlock) ProtoMessage() {} + +func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_NestedBlock.ProtoReflect.Descriptor instead. +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5, 2} +} + +func (x *Schema_NestedBlock) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *Schema_NestedBlock) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +func (x *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (x *Schema_NestedBlock) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema_NestedBlock) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type GetProviderSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema_Request) Reset() { + *x = GetProviderSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Request) ProtoMessage() {} + +func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 0} +} + +type GetProviderSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *GetProviderSchema_Response) Reset() { + *x = GetProviderSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Response) ProtoMessage() {} + +func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *GetProviderSchema_Response) GetProvider() *Schema { + if x != nil { + return x.Provider + } + return nil +} + +func (x *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if x != nil { + return x.ResourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if x != nil { + return x.DataSourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type PrepareProviderConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *PrepareProviderConfig_Request) Reset() { + *x = PrepareProviderConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig_Request) ProtoMessage() {} + +func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig_Request.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *PrepareProviderConfig_Response) Reset() { + *x = PrepareProviderConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig_Response) ProtoMessage() {} + +func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig_Response.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if x != nil { + return x.PreparedConfig + } + return nil +} + +func (x *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type UpgradeResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` +} + +func (x *UpgradeResourceState_Request) Reset() { + *x = UpgradeResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Request) ProtoMessage() {} + +func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *UpgradeResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *UpgradeResourceState_Request) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *UpgradeResourceState_Request) GetRawState() *RawState { + if x != nil { + return x.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *UpgradeResourceState_Response) Reset() { + *x = UpgradeResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Response) ProtoMessage() {} + +func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if x != nil { + return x.UpgradedState + } + return nil +} + +func (x *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateResourceTypeConfig_Request) Reset() { + *x = ValidateResourceTypeConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *ValidateResourceTypeConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateResourceTypeConfig_Response) Reset() { + *x = ValidateResourceTypeConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateDataSourceConfig_Request) Reset() { + *x = ValidateDataSourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} + +func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *ValidateDataSourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateDataSourceConfig_Response) Reset() { + *x = ValidateDataSourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} + +func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 1} +} + +func (x *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type Configure_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *Configure_Request) Reset() { + *x = Configure_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure_Request) ProtoMessage() {} + +func (x *Configure_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure_Request.ProtoReflect.Descriptor instead. +func (*Configure_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *Configure_Request) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion + } + return "" +} + +func (x *Configure_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type Configure_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *Configure_Response) Reset() { + *x = Configure_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure_Response) ProtoMessage() {} + +func (x *Configure_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure_Response.ProtoReflect.Descriptor instead. +func (*Configure_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} +} + +func (x *Configure_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ReadResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadResource_Request) Reset() { + *x = ReadResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Request) ProtoMessage() {} + +func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *ReadResource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadResource_Request) GetCurrentState() *DynamicValue { + if x != nil { + return x.CurrentState + } + return nil +} + +func (x *ReadResource_Request) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ReadResource_Response) Reset() { + *x = ReadResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Response) ProtoMessage() {} + +func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *ReadResource_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ReadResource_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type PlanResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *PlanResourceChange_Request) Reset() { + *x = PlanResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Request) ProtoMessage() {} + +func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *PlanResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if x != nil { + return x.ProposedNewState + } + return nil +} + +func (x *PlanResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *PlanResourceChange_Request) GetPriorPrivate() []byte { + if x != nil { + return x.PriorPrivate + } + return nil +} + +func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type PlanResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *PlanResourceChange_Response) Reset() { + *x = PlanResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Response) ProtoMessage() {} + +func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} +} + +func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if x != nil { + return x.RequiresReplace + } + return nil +} + +func (x *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ApplyResourceChange_Request) Reset() { + *x = ApplyResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Request) ProtoMessage() {} + +func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *ApplyResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ApplyResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *ApplyResourceChange_Response) Reset() { + *x = ApplyResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Response) ProtoMessage() {} + +func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} +} + +func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ApplyResourceChange_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ImportResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ImportResourceState_Request) Reset() { + *x = ImportResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Request) ProtoMessage() {} + +func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *ImportResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_Request) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ImportResourceState_ImportedResource) Reset() { + *x = ImportResourceState_ImportedResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_ImportedResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_ImportedResource) ProtoMessage() {} + +func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} +} + +func (x *ImportResourceState_ImportedResource) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ImportResourceState_ImportedResource) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type ImportResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ImportResourceState_Response) Reset() { + *x = ImportResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Response) ProtoMessage() {} + +func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 2} +} + +func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if x != nil { + return x.ImportedResources + } + return nil +} + +func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ReadDataSource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadDataSource_Request) Reset() { + *x = ReadDataSource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Request) ProtoMessage() {} + +func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *ReadDataSource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadDataSource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadDataSource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ReadDataSource_Response) Reset() { + *x = ReadDataSource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Response) ProtoMessage() {} + +func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} +} + +func (x *ReadDataSource_Response) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetProvisionerSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProvisionerSchema_Request) Reset() { + *x = GetProvisionerSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema_Request) ProtoMessage() {} + +func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} +} + +type GetProvisionerSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetProvisionerSchema_Response) Reset() { + *x = GetProvisionerSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema_Response) ProtoMessage() {} + +func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} +} + +func (x *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if x != nil { + return x.Provisioner + } + return nil +} + +func (x *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateProvisionerConfig_Request) Reset() { + *x = ValidateProvisionerConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} + +func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateProvisionerConfig_Response) Reset() { + *x = ValidateProvisionerConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} + +func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ProvisionResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (x *ProvisionResource_Request) Reset() { + *x = ProvisionResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource_Request) ProtoMessage() {} + +func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource_Request.ProtoReflect.Descriptor instead. +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *ProvisionResource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ProvisionResource_Request) GetConnection() *DynamicValue { + if x != nil { + return x.Connection + } + return nil +} + +type ProvisionResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ProvisionResource_Response) Reset() { + *x = ProvisionResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource_Response) ProtoMessage() {} + +func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource_Response.ProtoReflect.Descriptor instead. +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} +} + +func (x *ProvisionResource_Response) GetOutput() string { + if x != nil { + return x.Output + } + return "" +} + +func (x *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +var File_tfplugin5_proto protoreflect.FileDescriptor + +var file_tfplugin5_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x22, 0x3c, 0x0a, 0x0c, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, + 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, + 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, + 0x22, 0xdc, 0x01, 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, + 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, + 0x12, 0x27, 0x0a, 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x49, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, + 0x33, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, + 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, + 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x07, + 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, + 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, + 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, + 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0xd0, 0x04, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xaf, 0x04, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, + 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, + 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, + 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, + 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, + 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, + 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, + 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, + 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, + 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, + 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, + 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, + 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, + 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, + 0x97, 0x09, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x75, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x12, 0x1c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, + 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, + 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, + 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, 0x0b, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, + 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, + 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_tfplugin5_proto_rawDescOnce sync.Once + file_tfplugin5_proto_rawDescData = file_tfplugin5_proto_rawDesc +) + +func file_tfplugin5_proto_rawDescGZIP() []byte { + file_tfplugin5_proto_rawDescOnce.Do(func() { + file_tfplugin5_proto_rawDescData = protoimpl.X.CompressGZIP(file_tfplugin5_proto_rawDescData) + }) + return file_tfplugin5_proto_rawDescData +} + +var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 58) +var file_tfplugin5_proto_goTypes = []interface{}{ + (StringKind)(0), // 0: tfplugin5.StringKind + (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity + (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin5.Schema.NestedBlock.NestingMode + (*DynamicValue)(nil), // 3: tfplugin5.DynamicValue + (*Diagnostic)(nil), // 4: tfplugin5.Diagnostic + (*AttributePath)(nil), // 5: tfplugin5.AttributePath + (*Stop)(nil), // 6: tfplugin5.Stop + (*RawState)(nil), // 7: tfplugin5.RawState + (*Schema)(nil), // 8: tfplugin5.Schema + (*GetProviderSchema)(nil), // 9: tfplugin5.GetProviderSchema + (*PrepareProviderConfig)(nil), // 10: tfplugin5.PrepareProviderConfig + (*UpgradeResourceState)(nil), // 11: tfplugin5.UpgradeResourceState + (*ValidateResourceTypeConfig)(nil), // 12: tfplugin5.ValidateResourceTypeConfig + (*ValidateDataSourceConfig)(nil), // 13: tfplugin5.ValidateDataSourceConfig + (*Configure)(nil), // 14: tfplugin5.Configure + (*ReadResource)(nil), // 15: tfplugin5.ReadResource + (*PlanResourceChange)(nil), // 16: tfplugin5.PlanResourceChange + (*ApplyResourceChange)(nil), // 17: tfplugin5.ApplyResourceChange + (*ImportResourceState)(nil), // 18: tfplugin5.ImportResourceState + (*ReadDataSource)(nil), // 19: tfplugin5.ReadDataSource + (*GetProvisionerSchema)(nil), // 20: tfplugin5.GetProvisionerSchema + (*ValidateProvisionerConfig)(nil), // 21: tfplugin5.ValidateProvisionerConfig + (*ProvisionResource)(nil), // 22: tfplugin5.ProvisionResource + (*AttributePath_Step)(nil), // 23: tfplugin5.AttributePath.Step + (*Stop_Request)(nil), // 24: tfplugin5.Stop.Request + (*Stop_Response)(nil), // 25: tfplugin5.Stop.Response + nil, // 26: tfplugin5.RawState.FlatmapEntry + (*Schema_Block)(nil), // 27: tfplugin5.Schema.Block + (*Schema_Attribute)(nil), // 28: tfplugin5.Schema.Attribute + (*Schema_NestedBlock)(nil), // 29: tfplugin5.Schema.NestedBlock + (*GetProviderSchema_Request)(nil), // 30: tfplugin5.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 31: tfplugin5.GetProviderSchema.Response + nil, // 32: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 33: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + (*PrepareProviderConfig_Request)(nil), // 34: tfplugin5.PrepareProviderConfig.Request + (*PrepareProviderConfig_Response)(nil), // 35: tfplugin5.PrepareProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 36: tfplugin5.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 37: tfplugin5.UpgradeResourceState.Response + (*ValidateResourceTypeConfig_Request)(nil), // 38: tfplugin5.ValidateResourceTypeConfig.Request + (*ValidateResourceTypeConfig_Response)(nil), // 39: tfplugin5.ValidateResourceTypeConfig.Response + (*ValidateDataSourceConfig_Request)(nil), // 40: tfplugin5.ValidateDataSourceConfig.Request + (*ValidateDataSourceConfig_Response)(nil), // 41: tfplugin5.ValidateDataSourceConfig.Response + (*Configure_Request)(nil), // 42: tfplugin5.Configure.Request + (*Configure_Response)(nil), // 43: tfplugin5.Configure.Response + (*ReadResource_Request)(nil), // 44: tfplugin5.ReadResource.Request + (*ReadResource_Response)(nil), // 45: tfplugin5.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 46: tfplugin5.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 47: tfplugin5.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 48: tfplugin5.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 49: tfplugin5.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 50: tfplugin5.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 51: tfplugin5.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 52: tfplugin5.ImportResourceState.Response + (*ReadDataSource_Request)(nil), // 53: tfplugin5.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 54: tfplugin5.ReadDataSource.Response + (*GetProvisionerSchema_Request)(nil), // 55: tfplugin5.GetProvisionerSchema.Request + (*GetProvisionerSchema_Response)(nil), // 56: tfplugin5.GetProvisionerSchema.Response + (*ValidateProvisionerConfig_Request)(nil), // 57: tfplugin5.ValidateProvisionerConfig.Request + (*ValidateProvisionerConfig_Response)(nil), // 58: tfplugin5.ValidateProvisionerConfig.Response + (*ProvisionResource_Request)(nil), // 59: tfplugin5.ProvisionResource.Request + (*ProvisionResource_Response)(nil), // 60: tfplugin5.ProvisionResource.Response +} +var file_tfplugin5_proto_depIdxs = []int32{ + 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity + 5, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath + 23, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step + 26, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry + 27, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block + 28, // 5: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute + 29, // 6: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock + 0, // 7: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind + 0, // 8: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind + 27, // 9: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block + 2, // 10: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode + 8, // 11: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema + 32, // 12: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 33, // 13: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 4, // 14: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 15: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema + 8, // 16: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 8, // 17: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 3, // 18: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 19: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 4, // 20: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 7, // 21: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 3, // 22: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 4, // 23: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 24: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 25: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 26: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 27: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 28: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 29: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 30: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 3, // 31: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 32: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 33: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 34: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 35: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 3, // 36: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 37: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 38: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 5, // 39: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 4, // 40: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 41: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 42: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 3, // 43: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 44: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 45: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 46: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 47: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 51, // 48: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 4, // 49: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 50: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 51: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 52: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 4, // 53: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 54: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 4, // 55: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 56: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 57: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 58: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 59: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 4, // 60: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 30, // 61: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 34, // 62: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 38, // 63: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 40, // 64: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 36, // 65: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 42, // 66: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 44, // 67: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 46, // 68: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 48, // 69: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 50, // 70: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 53, // 71: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 24, // 72: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 55, // 73: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 57, // 74: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 59, // 75: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 24, // 76: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 31, // 77: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 35, // 78: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 39, // 79: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 41, // 80: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 37, // 81: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 43, // 82: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 45, // 83: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 47, // 84: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 49, // 85: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 52, // 86: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 54, // 87: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 25, // 88: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 56, // 89: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 58, // 90: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 60, // 91: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 25, // 92: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 77, // [77:93] is the sub-list for method output_type + 61, // [61:77] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name +} + +func init() { file_tfplugin5_proto_init() } +func file_tfplugin5_proto_init() { + if File_tfplugin5_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tfplugin5_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Diagnostic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RawState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_NestedBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_ImportedResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_tfplugin5_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tfplugin5_proto_rawDesc, + NumEnums: 3, + NumMessages: 58, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_tfplugin5_proto_goTypes, + DependencyIndexes: file_tfplugin5_proto_depIdxs, + EnumInfos: file_tfplugin5_proto_enumTypes, + MessageInfos: file_tfplugin5_proto_msgTypes, + }.Build() + File_tfplugin5_proto = out.File + file_tfplugin5_proto_rawDesc = nil + file_tfplugin5_proto_goTypes = nil + file_tfplugin5_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto new file mode 100644 index 00000000..341bd259 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto @@ -0,0 +1,369 @@ +// Terraform Plugin RPC protocol version 5.2 +// +// This file defines version 5.2 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the master +// branch or any other development branch. +// +syntax = "proto3"; + +package tfplugin5; +option go_package = "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"; + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +message DynamicValue { + bytes msgpack = 1; + bytes json = 2; +} + +message Diagnostic { + enum Severity { + INVALID = 0; + ERROR = 1; + WARNING = 2; + } + Severity severity = 1; + string summary = 2; + string detail = 3; + AttributePath attribute = 4; +} + +message AttributePath { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + string element_key_string = 2; + int64 element_key_int = 3; + } + } + repeated Step steps = 1; +} + +message Stop { + message Request { + } + message Response { + string Error = 1; + } +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +message RawState { + bytes json = 1; + map flatmap = 2; +} + +enum StringKind { + PLAIN = 0; + MARKDOWN = 1; +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +message Schema { + message Block { + int64 version = 1; + repeated Attribute attributes = 2; + repeated NestedBlock block_types = 3; + string description = 4; + StringKind description_kind = 5; + bool deprecated = 6; + } + + message Attribute { + string name = 1; + bytes type = 2; + string description = 3; + bool required = 4; + bool optional = 5; + bool computed = 6; + bool sensitive = 7; + StringKind description_kind = 8; + bool deprecated = 9; + } + + message NestedBlock { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + GROUP = 5; + } + + string type_name = 1; + Block block = 2; + NestingMode nesting = 3; + int64 min_items = 4; + int64 max_items = 5; + } + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + int64 version = 1; + + // Block is the top level configuration block for this schema. + Block block = 2; +} + +service Provider { + //////// Information about what a provider supports/expects + rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); + rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); + rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); + rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); + rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); + + //////// One-time initialization, called before other functions below + rpc Configure(Configure.Request) returns (Configure.Response); + + //////// Managed Resource Lifecycle + rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); + rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); + rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); + rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); + + rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + + //////// Graceful Shutdown + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProviderSchema { + message Request { + } + message Response { + Schema provider = 1; + map resource_schemas = 2; + map data_source_schemas = 3; + repeated Diagnostic diagnostics = 4; + Schema provider_meta = 5; + } +} + +message PrepareProviderConfig { + message Request { + DynamicValue config = 1; + } + message Response { + DynamicValue prepared_config = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message UpgradeResourceState { + message Request { + string type_name = 1; + + // version is the schema_version number recorded in the state file + int64 version = 2; + + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState raw_state = 3; + } + message Response { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + DynamicValue upgraded_state = 1; + + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateResourceTypeConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ValidateDataSourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message Configure { + message Request { + string terraform_version = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ReadResource { + message Request { + string type_name = 1; + DynamicValue current_state = 2; + bytes private = 3; + DynamicValue provider_meta = 4; + } + message Response { + DynamicValue new_state = 1; + repeated Diagnostic diagnostics = 2; + bytes private = 3; + } +} + +message PlanResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue proposed_new_state = 3; + DynamicValue config = 4; + bytes prior_private = 5; + DynamicValue provider_meta = 6; + } + + message Response { + DynamicValue planned_state = 1; + repeated AttributePath requires_replace = 2; + bytes planned_private = 3; + repeated Diagnostic diagnostics = 4; + + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; + } +} + +message ApplyResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue planned_state = 3; + DynamicValue config = 4; + bytes planned_private = 5; + DynamicValue provider_meta = 6; + } + message Response { + DynamicValue new_state = 1; + bytes private = 2; + repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; + } +} + +message ImportResourceState { + message Request { + string type_name = 1; + string id = 2; + } + + message ImportedResource { + string type_name = 1; + DynamicValue state = 2; + bytes private = 3; + } + + message Response { + repeated ImportedResource imported_resources = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ReadDataSource { + message Request { + string type_name = 1; + DynamicValue config = 2; + DynamicValue provider_meta = 3; + } + message Response { + DynamicValue state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +service Provisioner { + rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); + rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); + rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProvisionerSchema { + message Request { + } + message Response { + Schema provisioner = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateProvisionerConfig { + message Request { + DynamicValue config = 1; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ProvisionResource { + message Request { + DynamicValue config = 1; + DynamicValue connection = 2; + } + message Response { + string output = 1; + repeated Diagnostic diagnostics = 2; + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go new file mode 100644 index 00000000..2a1c19ad --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go @@ -0,0 +1,720 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package tfplugin5 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProviderClient interface { + //////// Information about what a provider supports/expects + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc grpc.ClientConnInterface +} + +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +// All implementations must embed UnimplementedProviderServer +// for forward compatibility +type ProviderServer interface { + //////// Information about what a provider supports/expects + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) + mustEmbedUnimplementedProviderServer() +} + +// UnimplementedProviderServer must be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (UnimplementedProviderServer) GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (UnimplementedProviderServer) PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") +} +func (UnimplementedProviderServer) ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") +} +func (UnimplementedProviderServer) ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") +} +func (UnimplementedProviderServer) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (UnimplementedProviderServer) Configure(context.Context, *Configure_Request) (*Configure_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (UnimplementedProviderServer) ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (UnimplementedProviderServer) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (UnimplementedProviderServer) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (UnimplementedProviderServer) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (UnimplementedProviderServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} +func (UnimplementedProviderServer) mustEmbedUnimplementedProviderServer() {} + +// UnsafeProviderServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProviderServer will +// result in compilation errors. +type UnsafeProviderServer interface { + mustEmbedUnimplementedProviderServer() +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc grpc.ClientConnInterface +} + +func NewProvisionerClient(cc grpc.ClientConnInterface) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +// All implementations must embed UnimplementedProvisionerServer +// for forward compatibility +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) + mustEmbedUnimplementedProvisionerServer() +} + +// UnimplementedProvisionerServer must be embedded to have forward compatible implementations. +type UnimplementedProvisionerServer struct { +} + +func (UnimplementedProvisionerServer) GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (UnimplementedProvisionerServer) ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") +} +func (UnimplementedProvisionerServer) ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error { + return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") +} +func (UnimplementedProvisionerServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} +func (UnimplementedProvisionerServer) mustEmbedUnimplementedProvisionerServer() {} + +// UnsafeProvisionerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ProvisionerServer will +// result in compilation errors. +type UnsafeProvisionerServer interface { + mustEmbedUnimplementedProvisionerServer() +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/attribute_path.go new file mode 100644 index 00000000..7e63d59d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/attribute_path.go @@ -0,0 +1,95 @@ +package toproto + +import ( + "errors" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +var ErrUnknownAttributePathStepType = errors.New("unknown type of AttributePath_Step") + +func AttributePath(in *tftypes.AttributePath) (*tfplugin5.AttributePath, error) { + steps, err := AttributePath_Steps(in.Steps) + if err != nil { + return nil, err + } + return &tfplugin5.AttributePath{ + Steps: steps, + }, nil +} + +func AttributePaths(in []*tftypes.AttributePath) ([]*tfplugin5.AttributePath, error) { + resp := make([]*tfplugin5.AttributePath, 0, len(in)) + for _, a := range in { + if a == nil { + resp = append(resp, nil) + continue + } + attr, err := AttributePath(a) + if err != nil { + return resp, err + } + resp = append(resp, attr) + } + return resp, nil +} + +func AttributePath_Step(step tftypes.AttributePathStep) (*tfplugin5.AttributePath_Step, error) { + var resp tfplugin5.AttributePath_Step + if name, ok := step.(tftypes.AttributeName); ok { + resp.Selector = &tfplugin5.AttributePath_Step_AttributeName{ + AttributeName: string(name), + } + return &resp, nil + } + if key, ok := step.(tftypes.ElementKeyString); ok { + resp.Selector = &tfplugin5.AttributePath_Step_ElementKeyString{ + ElementKeyString: string(key), + } + return &resp, nil + } + if key, ok := step.(tftypes.ElementKeyInt); ok { + resp.Selector = &tfplugin5.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: int64(key), + } + return &resp, nil + } + if _, ok := step.(tftypes.ElementKeyValue); ok { + // the protocol has no equivalent of an ElementKeyValue, so we + // return nil for both the step and the error here, to signal + // that we've hit a step we can't convey back to Terraform + return nil, nil + } + return nil, ErrUnknownAttributePathStepType +} + +func AttributePath_Steps(in []tftypes.AttributePathStep) ([]*tfplugin5.AttributePath_Step, error) { + resp := make([]*tfplugin5.AttributePath_Step, 0, len(in)) + for _, step := range in { + if step == nil { + resp = append(resp, nil) + continue + } + s, err := AttributePath_Step(step) + if err != nil { + return resp, err + } + // in the face of a set, the protocol has no way to represent + // the index, so we just bail and return the prefix we can + // return. + if s == nil { + return resp, nil + } + resp = append(resp, s) + } + return resp, nil +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go new file mode 100644 index 00000000..516812cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/data_source.go @@ -0,0 +1,61 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ValidateDataSourceConfig_Request(in *tfprotov5.ValidateDataSourceConfigRequest) (*tfplugin5.ValidateDataSourceConfig_Request, error) { + resp := &tfplugin5.ValidateDataSourceConfig_Request{ + TypeName: in.TypeName, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func ValidateDataSourceConfig_Response(in *tfprotov5.ValidateDataSourceConfigResponse) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfplugin5.ValidateDataSourceConfig_Response{ + Diagnostics: diags, + }, nil +} + +func ReadDataSource_Request(in *tfprotov5.ReadDataSourceRequest) (*tfplugin5.ReadDataSource_Request, error) { + resp := &tfplugin5.ReadDataSource_Request{ + TypeName: in.TypeName, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func ReadDataSource_Response(in *tfprotov5.ReadDataSourceResponse) (*tfplugin5.ReadDataSource_Response, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + resp := &tfplugin5.ReadDataSource_Response{ + Diagnostics: diags, + } + if in.State != nil { + resp.State = DynamicValue(in.State) + } + return resp, nil +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go new file mode 100644 index 00000000..979896f7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go @@ -0,0 +1,50 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func Diagnostic(in *tfprotov5.Diagnostic) (*tfplugin5.Diagnostic, error) { + diag := &tfplugin5.Diagnostic{ + Severity: Diagnostic_Severity(in.Severity), + Summary: in.Summary, + Detail: in.Detail, + } + if in.Attribute != nil { + attr, err := AttributePath(in.Attribute) + if err != nil { + return diag, err + } + diag.Attribute = attr + } + return diag, nil +} + +func Diagnostic_Severity(in tfprotov5.DiagnosticSeverity) tfplugin5.Diagnostic_Severity { + return tfplugin5.Diagnostic_Severity(in) +} + +func Diagnostics(in []*tfprotov5.Diagnostic) ([]*tfplugin5.Diagnostic, error) { + diagnostics := make([]*tfplugin5.Diagnostic, 0, len(in)) + for _, diag := range in { + if diag == nil { + diagnostics = append(diagnostics, nil) + continue + } + d, err := Diagnostic(diag) + if err != nil { + return diagnostics, err + } + diagnostics = append(diagnostics, d) + } + return diagnostics, nil +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/dynamic_value.go new file mode 100644 index 00000000..0c0af8a8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/dynamic_value.go @@ -0,0 +1,35 @@ +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +func DynamicValue(in *tfprotov5.DynamicValue) *tfplugin5.DynamicValue { + return &tfplugin5.DynamicValue{ + Msgpack: in.MsgPack, + Json: in.JSON, + } +} + +func CtyType(in tftypes.Type) ([]byte, error) { + switch { + case in.Is(tftypes.String), in.Is(tftypes.Bool), in.Is(tftypes.Number), + in.Is(tftypes.List{}), in.Is(tftypes.Map{}), + in.Is(tftypes.Set{}), in.Is(tftypes.Object{}), + in.Is(tftypes.Tuple{}), in.Is(tftypes.DynamicPseudoType): + return in.MarshalJSON() //nolint:staticcheck + } + return nil, fmt.Errorf("unknown type %s", in) +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go new file mode 100644 index 00000000..a18430f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go @@ -0,0 +1,120 @@ +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func GetProviderSchema_Request(in *tfprotov5.GetProviderSchemaRequest) (*tfplugin5.GetProviderSchema_Request, error) { + return &tfplugin5.GetProviderSchema_Request{}, nil +} + +func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) (*tfplugin5.GetProviderSchema_Response, error) { + var resp tfplugin5.GetProviderSchema_Response + if in.Provider != nil { + schema, err := Schema(in.Provider) + if err != nil { + return &resp, fmt.Errorf("error marshaling provider schema: %w", err) + } + resp.Provider = schema + } + if in.ProviderMeta != nil { + schema, err := Schema(in.ProviderMeta) + if err != nil { + return &resp, fmt.Errorf("error marshaling provider_meta schema: %w", err) + } + resp.ProviderMeta = schema + } + resp.ResourceSchemas = make(map[string]*tfplugin5.Schema, len(in.ResourceSchemas)) + for k, v := range in.ResourceSchemas { + if v == nil { + resp.ResourceSchemas[k] = nil + continue + } + schema, err := Schema(v) + if err != nil { + return &resp, fmt.Errorf("error marshaling resource schema for %q: %w", k, err) + } + resp.ResourceSchemas[k] = schema + } + resp.DataSourceSchemas = make(map[string]*tfplugin5.Schema, len(in.DataSourceSchemas)) + for k, v := range in.DataSourceSchemas { + if v == nil { + resp.DataSourceSchemas[k] = nil + continue + } + schema, err := Schema(v) + if err != nil { + return &resp, fmt.Errorf("error marshaling data source schema for %q: %w", k, err) + } + resp.DataSourceSchemas[k] = schema + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return &resp, err + } + resp.Diagnostics = diags + return &resp, nil +} + +func PrepareProviderConfig_Request(in *tfprotov5.PrepareProviderConfigRequest) (*tfplugin5.PrepareProviderConfig_Request, error) { + resp := &tfplugin5.PrepareProviderConfig_Request{} + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func PrepareProviderConfig_Response(in *tfprotov5.PrepareProviderConfigResponse) (*tfplugin5.PrepareProviderConfig_Response, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + resp := &tfplugin5.PrepareProviderConfig_Response{ + Diagnostics: diags, + } + if in.PreparedConfig != nil { + resp.PreparedConfig = DynamicValue(in.PreparedConfig) + } + return resp, nil +} + +func Configure_Request(in *tfprotov5.ConfigureProviderRequest) (*tfplugin5.Configure_Request, error) { + resp := &tfplugin5.Configure_Request{ + TerraformVersion: in.TerraformVersion, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func Configure_Response(in *tfprotov5.ConfigureProviderResponse) (*tfplugin5.Configure_Response, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfplugin5.Configure_Response{ + Diagnostics: diags, + }, nil +} + +func Stop_Request(in *tfprotov5.StopProviderRequest) (*tfplugin5.Stop_Request, error) { + return &tfplugin5.Stop_Request{}, nil +} + +func Stop_Response(in *tfprotov5.StopProviderResponse) (*tfplugin5.Stop_Response, error) { + return &tfplugin5.Stop_Response{ + Error: in.Error, + }, nil +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go new file mode 100644 index 00000000..77eba487 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/resource.go @@ -0,0 +1,214 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func ValidateResourceTypeConfig_Request(in *tfprotov5.ValidateResourceTypeConfigRequest) (*tfplugin5.ValidateResourceTypeConfig_Request, error) { + resp := &tfplugin5.ValidateResourceTypeConfig_Request{ + TypeName: in.TypeName, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + return resp, nil +} + +func ValidateResourceTypeConfig_Response(in *tfprotov5.ValidateResourceTypeConfigResponse) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfplugin5.ValidateResourceTypeConfig_Response{ + Diagnostics: diags, + }, nil +} + +func UpgradeResourceState_Request(in *tfprotov5.UpgradeResourceStateRequest) (*tfplugin5.UpgradeResourceState_Request, error) { + resp := &tfplugin5.UpgradeResourceState_Request{ + TypeName: in.TypeName, + Version: in.Version, + } + if in.RawState != nil { + resp.RawState = RawState(in.RawState) + } + return resp, nil +} + +func UpgradeResourceState_Response(in *tfprotov5.UpgradeResourceStateResponse) (*tfplugin5.UpgradeResourceState_Response, error) { + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + resp := &tfplugin5.UpgradeResourceState_Response{ + Diagnostics: diags, + } + if in.UpgradedState != nil { + resp.UpgradedState = DynamicValue(in.UpgradedState) + } + return resp, nil +} + +func ReadResource_Request(in *tfprotov5.ReadResourceRequest) (*tfplugin5.ReadResource_Request, error) { + resp := &tfplugin5.ReadResource_Request{ + TypeName: in.TypeName, + Private: in.Private, + } + if in.CurrentState != nil { + resp.CurrentState = DynamicValue(in.CurrentState) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func ReadResource_Response(in *tfprotov5.ReadResourceResponse) (*tfplugin5.ReadResource_Response, error) { + resp := &tfplugin5.ReadResource_Response{ + Private: in.Private, + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return resp, err + } + resp.Diagnostics = diags + if in.NewState != nil { + resp.NewState = DynamicValue(in.NewState) + } + return resp, nil +} + +func PlanResourceChange_Request(in *tfprotov5.PlanResourceChangeRequest) (*tfplugin5.PlanResourceChange_Request, error) { + resp := &tfplugin5.PlanResourceChange_Request{ + TypeName: in.TypeName, + PriorPrivate: in.PriorPrivate, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + if in.PriorState != nil { + resp.PriorState = DynamicValue(in.PriorState) + } + if in.ProposedNewState != nil { + resp.ProposedNewState = DynamicValue(in.ProposedNewState) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func PlanResourceChange_Response(in *tfprotov5.PlanResourceChangeResponse) (*tfplugin5.PlanResourceChange_Response, error) { + resp := &tfplugin5.PlanResourceChange_Response{ + PlannedPrivate: in.PlannedPrivate, + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck + } + requiresReplace, err := AttributePaths(in.RequiresReplace) + if err != nil { + return resp, err + } + resp.RequiresReplace = requiresReplace + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return resp, err + } + resp.Diagnostics = diags + if in.PlannedState != nil { + resp.PlannedState = DynamicValue(in.PlannedState) + } + return resp, nil +} + +func ApplyResourceChange_Request(in *tfprotov5.ApplyResourceChangeRequest) (*tfplugin5.ApplyResourceChange_Request, error) { + resp := &tfplugin5.ApplyResourceChange_Request{ + TypeName: in.TypeName, + PlannedPrivate: in.PlannedPrivate, + } + if in.Config != nil { + resp.Config = DynamicValue(in.Config) + } + if in.PriorState != nil { + resp.PriorState = DynamicValue(in.PriorState) + } + if in.PlannedState != nil { + resp.PlannedState = DynamicValue(in.PlannedState) + } + if in.ProviderMeta != nil { + resp.ProviderMeta = DynamicValue(in.ProviderMeta) + } + return resp, nil +} + +func ApplyResourceChange_Response(in *tfprotov5.ApplyResourceChangeResponse) (*tfplugin5.ApplyResourceChange_Response, error) { + resp := &tfplugin5.ApplyResourceChange_Response{ + Private: in.Private, + LegacyTypeSystem: in.UnsafeToUseLegacyTypeSystem, //nolint:staticcheck + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return resp, err + } + resp.Diagnostics = diags + if in.NewState != nil { + resp.NewState = DynamicValue(in.NewState) + } + return resp, nil +} + +func ImportResourceState_Request(in *tfprotov5.ImportResourceStateRequest) (*tfplugin5.ImportResourceState_Request, error) { + return &tfplugin5.ImportResourceState_Request{ + TypeName: in.TypeName, + Id: in.ID, + }, nil +} + +func ImportResourceState_Response(in *tfprotov5.ImportResourceStateResponse) (*tfplugin5.ImportResourceState_Response, error) { + importedResources, err := ImportResourceState_ImportedResources(in.ImportedResources) + if err != nil { + return nil, err + } + diags, err := Diagnostics(in.Diagnostics) + if err != nil { + return nil, err + } + return &tfplugin5.ImportResourceState_Response{ + ImportedResources: importedResources, + Diagnostics: diags, + }, nil +} + +func ImportResourceState_ImportedResource(in *tfprotov5.ImportedResource) (*tfplugin5.ImportResourceState_ImportedResource, error) { + resp := &tfplugin5.ImportResourceState_ImportedResource{ + TypeName: in.TypeName, + Private: in.Private, + } + if in.State != nil { + resp.State = DynamicValue(in.State) + } + return resp, nil +} + +func ImportResourceState_ImportedResources(in []*tfprotov5.ImportedResource) ([]*tfplugin5.ImportResourceState_ImportedResource, error) { + resp := make([]*tfplugin5.ImportResourceState_ImportedResource, 0, len(in)) + for _, i := range in { + if i == nil { + resp = append(resp, nil) + continue + } + r, err := ImportResourceState_ImportedResource(i) + if err != nil { + return resp, err + } + resp = append(resp, r) + } + return resp, nil +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/schema.go new file mode 100644 index 00000000..c39ee2a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/schema.go @@ -0,0 +1,121 @@ +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func Schema(in *tfprotov5.Schema) (*tfplugin5.Schema, error) { + var resp tfplugin5.Schema + resp.Version = in.Version + if in.Block != nil { + block, err := Schema_Block(in.Block) + if err != nil { + return &resp, fmt.Errorf("error marshalling block: %w", err) + } + resp.Block = block + } + return &resp, nil +} + +func Schema_Block(in *tfprotov5.SchemaBlock) (*tfplugin5.Schema_Block, error) { + resp := &tfplugin5.Schema_Block{ + Version: in.Version, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Deprecated: in.Deprecated, + } + attrs, err := Schema_Attributes(in.Attributes) + if err != nil { + return resp, err + } + resp.Attributes = attrs + blocks, err := Schema_NestedBlocks(in.BlockTypes) + if err != nil { + return resp, err + } + resp.BlockTypes = blocks + return resp, nil +} + +func Schema_Attribute(in *tfprotov5.SchemaAttribute) (*tfplugin5.Schema_Attribute, error) { + resp := &tfplugin5.Schema_Attribute{ + Name: in.Name, + Description: in.Description, + Required: in.Required, + Optional: in.Optional, + Computed: in.Computed, + Sensitive: in.Sensitive, + DescriptionKind: StringKind(in.DescriptionKind), + Deprecated: in.Deprecated, + } + t, err := CtyType(in.Type) + if err != nil { + return resp, fmt.Errorf("error marshaling type to JSON: %w", err) + } + resp.Type = t + return resp, nil +} + +func Schema_Attributes(in []*tfprotov5.SchemaAttribute) ([]*tfplugin5.Schema_Attribute, error) { + resp := make([]*tfplugin5.Schema_Attribute, 0, len(in)) + for _, a := range in { + if a == nil { + resp = append(resp, nil) + continue + } + attr, err := Schema_Attribute(a) + if err != nil { + return nil, err + } + resp = append(resp, attr) + } + return resp, nil +} + +func Schema_NestedBlock(in *tfprotov5.SchemaNestedBlock) (*tfplugin5.Schema_NestedBlock, error) { + resp := &tfplugin5.Schema_NestedBlock{ + TypeName: in.TypeName, + Nesting: Schema_NestedBlock_NestingMode(in.Nesting), + MinItems: in.MinItems, + MaxItems: in.MaxItems, + } + if in.Block != nil { + block, err := Schema_Block(in.Block) + if err != nil { + return resp, fmt.Errorf("error marshaling nested block: %w", err) + } + resp.Block = block + } + return resp, nil +} + +func Schema_NestedBlocks(in []*tfprotov5.SchemaNestedBlock) ([]*tfplugin5.Schema_NestedBlock, error) { + resp := make([]*tfplugin5.Schema_NestedBlock, 0, len(in)) + for _, b := range in { + if b == nil { + resp = append(resp, nil) + continue + } + block, err := Schema_NestedBlock(b) + if err != nil { + return nil, err + } + resp = append(resp, block) + } + return resp, nil +} + +func Schema_NestedBlock_NestingMode(in tfprotov5.SchemaNestedBlockNestingMode) tfplugin5.Schema_NestedBlock_NestingMode { + return tfplugin5.Schema_NestedBlock_NestingMode(in) +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/state.go new file mode 100644 index 00000000..3a13d1f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/state.go @@ -0,0 +1,21 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func RawState(in *tfprotov5.RawState) *tfplugin5.RawState { + return &tfplugin5.RawState{ + Json: in.JSON, + Flatmap: in.Flatmap, + } +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/string_kind.go new file mode 100644 index 00000000..1ea6628c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/string_kind.go @@ -0,0 +1,18 @@ +package toproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func StringKind(in tfprotov5.StringKind) tfplugin5.StringKind { + return tfplugin5.StringKind(in) +} + +// we have to say this next thing to get golint to stop yelling at us about the +// underscores in the function names. We want the function names to match +// actually-generated code, so it feels like fair play. It's just a shame we +// lose golint for the entire file. +// +// This file is not actually generated. You can edit it. Ignore this next line. +// Code generated by hand ignore this next bit DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go new file mode 100644 index 00000000..c4b91cdc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go @@ -0,0 +1,181 @@ +package tfprotov5 + +import ( + "context" +) + +// ProviderServer is an interface that reflects that Terraform protocol. +// Providers must implement this interface. +type ProviderServer interface { + // GetProviderSchema is called when Terraform needs to know what the + // provider's schema is, along with the schemas of all its resources + // and data sources. + GetProviderSchema(context.Context, *GetProviderSchemaRequest) (*GetProviderSchemaResponse, error) + + // PrepareProviderConfig is called to give a provider a chance to + // modify the configuration the user specified before validation. + PrepareProviderConfig(context.Context, *PrepareProviderConfigRequest) (*PrepareProviderConfigResponse, error) + + // ConfigureProvider is called to pass the user-specified provider + // configuration to the provider. + ConfigureProvider(context.Context, *ConfigureProviderRequest) (*ConfigureProviderResponse, error) + + // StopProvider is called when Terraform would like providers to shut + // down as quickly as possible, and usually represents an interrupt. + StopProvider(context.Context, *StopProviderRequest) (*StopProviderResponse, error) + + // ResourceServer is an interface encapsulating all the + // resource-related RPC requests. ProviderServer implementations must + // implement them, but they are a handy interface for defining what a + // resource is to terraform-plugin-go, so they're their own interface + // that is composed into ProviderServer. + ResourceServer + + // DataSourceServer is an interface encapsulating all the data + // source-related RPC requests. ProviderServer implementations must + // implement them, but they are a handy interface for defining what a + // data source is to terraform-plugin-go, so they're their own + // interface that is composed into ProviderServer. + DataSourceServer +} + +// GetProviderSchemaRequest represents a Terraform RPC request for the +// provider's schemas. +type GetProviderSchemaRequest struct{} + +// GetProviderSchemaResponse represents a Terraform RPC response containing the +// provider's schemas. +type GetProviderSchemaResponse struct { + // Provider defines the schema for the provider configuration, which + // will be specified in the provider block of the user's configuration. + Provider *Schema + + // ProviderMeta defines the schema for the provider's metadta, which + // will be specified in the provider_meta blocks of the terraform block + // for a module. This is an advanced feature and its usage should be + // coordinated with the Terraform Core team by opening an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. + ProviderMeta *Schema + + // ResourceSchemas is a map of resource names to the schema for the + // configuration specified in the resource. The name should be a + // resource name, and should be prefixed with your provider's shortname + // and an underscore. It should match the first label after `resource` + // in a user's configuration. + ResourceSchemas map[string]*Schema + + // DataSourceSchemas is a map of data source names to the schema for + // the configuration specified in the data source. The name should be a + // data source name, and should be prefixed with your provider's + // shortname and an underscore. It should match the first label after + // `data` in a user's configuration. + DataSourceSchemas map[string]*Schema + + // Diagnostics report errors or warnings related to returning the + // provider's schemas. Returning an empty slice indicates success, with + // no errors or warnings generated. + Diagnostics []*Diagnostic +} + +// PrepareProviderConfigRequest represents a Terraform RPC request for the +// provider to modify the provider configuration in preparation for Terraform +// validating it. +type PrepareProviderConfigRequest struct { + // Config is the configuration the user supplied for the provider. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // The PrepareProviderConfig RPC call will be called twice; once when + // generating a plan, once when applying the plan. When called during + // plan, Config can contain unknown values if fields with unknown + // values are interpolated into it. At apply time, all fields will have + // known values. Values that are not set in the configuration will be + // null. + Config *DynamicValue +} + +// PrepareProviderConfigResponse represents a Terraform RPC response containing +// a modified provider configuration that Terraform can now validate and use. +type PrepareProviderConfigResponse struct { + // PreparedConfig should be set to the modified configuration. See the + // documentation on `DynamicValue` for information about safely + // creating the `DynamicValue`. + // + // This RPC call exists because early versions of the Terraform Plugin + // SDK allowed providers to set defaults for provider configurations in + // such a way that Terraform couldn't validate the provider config + // without retrieving the default values first. As providers using + // terraform-plugin-go directly and new frameworks built on top of it + // have no such requirement, it is safe and recommended to simply set + // PreparedConfig to the value of the PrepareProviderConfigRequest's + // Config property, indicating that no changes are needed to the + // configuration. + // + // The configuration should be represented as a tftypes.Object, with + // each attribute and nested block getting its own key and value. + // + // TODO: should we provide an implementation that does that that + // provider developers can just embed and not need to implement the + // method themselves, then? + PreparedConfig *DynamicValue + + // Diagnostics report errors or warnings related to preparing the + // provider's configuration. Returning an empty slice indicates + // success, with no errors or warnings generated. + Diagnostics []*Diagnostic +} + +// ConfigureProviderRequest represents a Terraform RPC request to supply the +// provider with information about what the user entered in the provider's +// configuration block. +type ConfigureProviderRequest struct { + // TerraformVersion is the version of Terraform executing the request. + // This is supplied for logging, analytics, and User-Agent purposes + // *only*. Providers should not try to gate provider behavior on + // Terraform versions. It will make you sad. We can't stop you from + // doing it, but we really highly recommend you do not do it. + TerraformVersion string + + // Config is the configuration the user supplied for the provider. This + // information should usually be persisted to the underlying type + // that's implementing the ProviderServer interface, for use in later + // RPC requests. See the documentation on `DynamicValue` for more + // information about safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // The ConfigureProvider RPC call will be called twice; once when + // generating a plan, once when applying the plan. When called during + // plan, Config can contain unknown values if fields with unknown + // values are interpolated into it. At apply time, all fields will have + // known values. Values that are not set in the configuration will be + // null. + Config *DynamicValue +} + +// ConfigureProviderResponse represents a Terraform RPC response to the +// configuration block that Terraform supplied for the provider. +type ConfigureProviderResponse struct { + // Diagnostics report errors or warnings related to the provider's + // configuration. Returning an empty slice indicates success, with no + // errors or warnings generated. + Diagnostics []*Diagnostic +} + +// StopProviderRequest represents a Terraform RPC request to interrupt a +// provider's work and terminate a provider's processes as soon as possible. +type StopProviderRequest struct{} + +// StopProviderResponse represents a Terraform RPC response surfacing an issues +// the provider encountered in terminating. +type StopProviderResponse struct { + // Error should be set to a string describing the error if the provider + // cannot currently shut down for some reason. Because this always + // represents a system error and not a user error, it is returned as a + // string, not a Diagnostic. + Error string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go new file mode 100644 index 00000000..ea16343f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/resource.go @@ -0,0 +1,451 @@ +package tfprotov5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +// ResourceServer is an interface containing the methods a resource +// implementation needs to fill. +type ResourceServer interface { + // ValidateResourceTypeConfig is called when Terraform is checking that + // a resource's configuration is valid. It is guaranteed to have types + // conforming to your schema. This is your opportunity to do custom or + // advanced validation prior to a plan being generated. + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfigRequest) (*ValidateResourceTypeConfigResponse, error) + + // UpgradeResourceState is called when Terraform has encountered a + // resource with a state in a schema that doesn't match the schema's + // current version. It is the provider's responsibility to modify the + // state to upgrade it to the latest state schema. + UpgradeResourceState(context.Context, *UpgradeResourceStateRequest) (*UpgradeResourceStateResponse, error) + + // ReadResource is called when Terraform is refreshing a resource's + // state. + ReadResource(context.Context, *ReadResourceRequest) (*ReadResourceResponse, error) + + // PlanResourceChange is called when Terraform is attempting to + // calculate a plan for a resource. Terraform will suggest a proposed + // new state, which the provider can modify or return unmodified to + // influence Terraform's plan. + PlanResourceChange(context.Context, *PlanResourceChangeRequest) (*PlanResourceChangeResponse, error) + + // ApplyResourceChange is called when Terraform has detected a diff + // between the resource's state and the user's config, and the user has + // approved a planned change. The provider is to apply the changes + // contained in the plan, and return the resulting state. + ApplyResourceChange(context.Context, *ApplyResourceChangeRequest) (*ApplyResourceChangeResponse, error) + + // ImportResourceState is called when a user has requested Terraform + // import a resource. The provider should fetch the information + // specified by the passed ID and return it as one or more resource + // states for Terraform to assume control of. + ImportResourceState(context.Context, *ImportResourceStateRequest) (*ImportResourceStateResponse, error) +} + +// ValidateResourceTypeConfigRequest is the request Terraform sends when it +// wants to validate a resource's configuration. +type ValidateResourceTypeConfigRequest struct { + // TypeName is the type of resource Terraform is validating. + TypeName string + + // Config is the configuration the user supplied for that resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. Any attributes not directly + // set in the configuration will be null. + Config *DynamicValue +} + +// ValidateResourceTypeConfigResponse is the response from the provider about +// the validity of a resource's configuration. +type ValidateResourceTypeConfigResponse struct { + // Diagnostics report errors or warnings related to the given + // configuration. Returning an empty slice indicates a successful + // validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// UpgradeResourceStateRequest is the request Terraform sends when it needs a +// provider to upgrade the state of a given resource. +type UpgradeResourceStateRequest struct { + // TypeName is the type of resource that Terraform needs to upgrade the + // state for. + TypeName string + + // Version is the version of the state the resource currently has. + Version int64 + + // RawState is the state as Terraform sees it right now. See the + // documentation for `RawState` for information on how to work with the + // data it contains. + RawState *RawState +} + +// UpgradeResourceStateResponse is the response from the provider containing +// the upgraded state for the given resource. +type UpgradeResourceStateResponse struct { + // UpgradedState is the upgraded state for the resource, represented as + // a `DynamicValue`. See the documentation on `DynamicValue` for + // information about safely creating the `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + UpgradedState *DynamicValue + + // Diagnostics report errors or warnings related to upgrading the + // state of the requested resource. Returning an empty slice indicates + // a successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ReadResourceRequest is the request Terraform sends when it wants to get the +// latest state for a resource. +type ReadResourceRequest struct { + // TypeName is the type of resource Terraform is requesting an upated + // state for. + TypeName string + + // CurrentState is the current state of the resource as far as + // Terraform knows, represented as a `DynamicValue`. See the + // documentation for `DynamicValue` for information about safely + // accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + CurrentState *DynamicValue + + // Private is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + Private []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ReadResourceResponse is the response from the provider about the current +// state of the requested resource. +type ReadResourceResponse struct { + // NewState is the current state of the resource according to the + // provider, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely creating the + // `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + NewState *DynamicValue + + // Diagnostics report errors or warnings related to retrieving the + // current state of the requested resource. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte +} + +// PlanResourceChangeRequest is the request Terraform sends when it is +// generating a plan for a resource and wants the provider's input on what the +// planned state should be. +type PlanResourceChangeRequest struct { + // TypeName is the type of resource Terraform is generating a plan for. + TypeName string + + // PriorState is the state of the resource before the plan is applied, + // represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PriorState *DynamicValue + + // ProposedNewState is the state that Terraform is proposing for the + // resource, with the changes in the configuration applied, represented + // as a `DynamicValue`. See the documentation for `DynamicValue` for + // information about safely accessing the state. + // + // The ProposedNewState merges any non-null values in the configuration + // with any computed attributes in PriorState as a utility to help + // providers avoid needing to implement such merging functionality + // themselves. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + // + // The ProposedNewState will be null when planning a delete operation. + ProposedNewState *DynamicValue + + // Config is the configuration the user supplied for the resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config *DynamicValue + + // PriorPrivate is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + PriorPrivate []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// PlanResourceChangeResponse is the response from the provider about what the +// planned state for a given resource should be. +type PlanResourceChangeResponse struct { + // PlannedState is the provider's indication of what the state for the + // resource should be after apply, represented as a `DynamicValue`. See + // the documentation for `DynamicValue` for information about safely + // creating the `DynamicValue`. + // + // This is usually derived from the ProposedNewState passed in the + // PlanResourceChangeRequest, with default values substituted for any + // null values and overriding any computed values that are expected to + // change as a result of the apply operation. This may contain unknown + // values if the value could change but its new value won't be known + // until apply time. + // + // Any value that was non-null in the configuration must either + // preserve the exact configuration value or return the corresponding + // value from the prior state. The value from the prior state should be + // returned when the configuration value is semantically equivalent to + // the state value. + // + // Any value that is marked as computed in the schema and is null in + // the configuration may be set by the provider to any value of the + // expected type. + // + // PlanResourceChange will actually be called twice; once when + // generating the plan for the user to approve, once during the apply. + // During the apply, additional values from the configuration--upstream + // values interpolated in that were computed at apply time--will be + // populated. During this second call, any attribute that had a known + // value in the first PlannedState must have an identical value in the + // second PlannedState. Any unknown values may remain unknown or may + // take on any value of the appropriate type. This means the values + // returned in PlannedState should be deterministic and unknown values + // should be used if a field's value may change depending on what value + // ends up filling an unknown value in the config. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + PlannedState *DynamicValue + + // RequiresReplace is a list of tftypes.AttributePaths that require the + // resource to be replaced. They should point to the specific field + // that changed that requires the resource to be destroyed and + // recreated. + RequiresReplace []*tftypes.AttributePath + + // PlannedPrivate should be set to any state that the provider would + // like sent with requests for this resource. This state will be + // associated with the resource, but will not be considered when + // calculating diffs. + PlannedPrivate []byte + + // Diagnostics report errors or warnings related to determining the + // planned state of the requested resource. Returning an empty slice + // indicates a successful validation with no warnings or errors + // generated. + Diagnostics []*Diagnostic + + // UnsafeToUseLegacyTypeSystem should only be set by + // hashicorp/terraform-plugin-sdk. It modifies Terraform's behavior to + // work with the legacy expectations of that SDK. + // + // Nobody else should use this. Ever. For any reason. Just don't do it. + // + // We have to expose it here for terraform-plugin-sdk to be muxable, or + // we wouldn't even be including it in this type. Don't use it. It may + // go away or change behavior on you with no warning. It is + // explicitly unsupported and not part of our SemVer guarantees. + // + // Deprecated: Really, just don't use this, you don't need it. + UnsafeToUseLegacyTypeSystem bool +} + +// ApplyResourceChangeRequest is the request Terraform sends when it needs to +// apply a planned set of changes to a resource. +type ApplyResourceChangeRequest struct { + // TypeName is the type of resource Terraform wants to change. + TypeName string + + // PriorState is the state of the resource before the changes are + // applied, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely accessing the state. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PriorState *DynamicValue + + // PlannedState is Terraform's plan for what the state should look like + // after the changes are applied, represented as a `DynamicValue`. See + // the documentation for `DynamicValue` for information about safely + // accessing the state. + // + // This is the PlannedState returned during PlanResourceChange. + // + // The state is represented as a tftypes.Object, with each attribute + // and nested block getting its own key and value. + PlannedState *DynamicValue + + // Config is the configuration the user supplied for the resource. See + // the documentation on `DynamicValue` for more information about + // safely accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration may contain unknown values. + Config *DynamicValue + + // PlannedPrivate is any provider-defined private state stored with the + // resource. It is used for keeping state with the resource that is not + // meant to be included when calculating diffs. + PlannedPrivate []byte + + // ProviderMeta supplies the provider metadata configuration for the + // module this resource is in. Module-specific provider metadata is an + // advanced feature and usage of it should be coordinated with the + // Terraform Core team by raising an issue at + // https://github.com/hashicorp/terraform/issues/new/choose. See the + // documentation on `DynamicValue` for information about safely + // accessing the configuration. + // + // The configuration is represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + // + // This configuration will have known values for all fields. + ProviderMeta *DynamicValue +} + +// ApplyResourceChangeResponse is the response from the provider about what the +// state of a resource is after planned changes have been applied. +type ApplyResourceChangeResponse struct { + // NewState is the provider's understanding of what the resource's + // state is after changes are applied, represented as a `DynamicValue`. + // See the documentation for `DynamicValue` for information about + // safely creating the `DynamicValue`. + // + // Any attribute, whether computed or not, that has a known value in + // the PlannedState in the ApplyResourceChangeRequest must be preserved + // exactly as it was in NewState. + // + // Any attribute in the PlannedState in the ApplyResourceChangeRequest + // that is unknown must take on a known value at this time. No unknown + // values are allowed in the NewState. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + NewState *DynamicValue + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte + + // Diagnostics report errors or warnings related to applying changes to + // the requested resource. Returning an empty slice indicates a + // successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic + + // UnsafeToUseLegacyTypeSystem should only be set by + // hashicorp/terraform-plugin-sdk. It modifies Terraform's behavior to + // work with the legacy expectations of that SDK. + // + // Nobody else should use this. Ever. For any reason. Just don't do it. + // + // We have to expose it here for terraform-plugin-sdk to be muxable, or + // we wouldn't even be including it in this type. Don't use it. It may + // go away or change behavior on you with no warning. It is + // explicitly unsupported and not part of our SemVer guarantees. + // + // Deprecated: Really, just don't use this, you don't need it. + UnsafeToUseLegacyTypeSystem bool +} + +// ImportResourceStateRequest is the request Terraform sends when it wants a +// provider to import one or more resources specified by an ID. +type ImportResourceStateRequest struct { + // TypeName is the type of resource Terraform wants to import. + TypeName string + + // ID is the user-supplied identifying information about the resource + // or resources. Providers decide and communicate to users the format + // for the ID, and use it to determine what resource or resources to + // import. + ID string +} + +// ImportResourceStateResponse is the response from the provider about the +// imported resources. +type ImportResourceStateResponse struct { + // ImportedResources are the resources the provider found and was able + // to import. + ImportedResources []*ImportedResource + + // Diagnostics report errors or warnings related to importing the + // requested resource or resources. Returning an empty slice indicates + // a successful validation with no warnings or errors generated. + Diagnostics []*Diagnostic +} + +// ImportedResource represents a single resource that a provider has +// successfully imported into state. +type ImportedResource struct { + // TypeName is the type of resource that was imported. + TypeName string + + // State is the provider's understanding of the imported resource's + // state, represented as a `DynamicValue`. See the documentation for + // `DynamicValue` for information about safely creating the + // `DynamicValue`. + // + // The state should be represented as a tftypes.Object, with each + // attribute and nested block getting its own key and value. + State *DynamicValue + + // Private should be set to any state that the provider would like sent + // with requests for this resource. This state will be associated with + // the resource, but will not be considered when calculating diffs. + Private []byte +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go new file mode 100644 index 00000000..a2c7426c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/schema.go @@ -0,0 +1,223 @@ +package tfprotov5 + +import "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + +const ( + // SchemaNestedBlockNestingModeInvalid indicates that the nesting mode + // for a nested block in the schema is invalid. This generally + // indicates a nested block that was created incorrectly. + SchemaNestedBlockNestingModeInvalid SchemaNestedBlockNestingMode = 0 + + // SchemaNestedBlockNestingModeSingle indicates that the nested block + // should be treated as a single block with no labels, and there should + // not be more than one of these blocks in the containing block. The + // block will appear in config and state values as a tftypes.Object. + SchemaNestedBlockNestingModeSingle SchemaNestedBlockNestingMode = 1 + + // SchemaNestedBlockNestingModeList indicates that multiple instances + // of the nested block should be permitted, with no labels, and that + // the instances of the block should appear in config and state values + // as a tftypes.List, with an ElementType of tftypes.Object. + SchemaNestedBlockNestingModeList SchemaNestedBlockNestingMode = 2 + + // SchemaNestedBlockNestingModeSet indicates that multiple instances + // of the nested block should be permitted, with no labels, and that + // the instances of the block should appear in config and state values + // as a tftypes.Set, with an ElementType of tftypes.Object. + SchemaNestedBlockNestingModeSet SchemaNestedBlockNestingMode = 3 + + // SchemaNestedBlockNestingModeMap indicates that multiple instances of + // the nested block should be permitted, each with a single label, and + // that they should be represented in state and config values as a + // tftypes.Map, with an AttributeType of tftypes.Object. The labels on + // the blocks will be used as the map keys. It is an error, therefore, + // to use the same label value on multiple block instances. + SchemaNestedBlockNestingModeMap SchemaNestedBlockNestingMode = 4 + + // SchemaNestedBlockNestingModeGroup indicates that the nested block + // should be treated as a single block with no labels, and there should + // not be more than one of these blocks in the containing block. The + // block will appear in config and state values as a tftypes.Object. + // + // SchemaNestedBlockNestingModeGroup is distinct from + // SchemaNestedBlockNestingModeSingle in that it guarantees that the + // block will never be null. If it is omitted from a config, the block + // will still be set, but its attributes and nested blocks will all be + // null. This is an exception to the rule that any block not set in the + // configuration cannot be set in config by the provider; this ensures + // the block is always considered "set" in the configuration, and is + // therefore settable in state by the provider. + SchemaNestedBlockNestingModeGroup SchemaNestedBlockNestingMode = 5 +) + +// Schema is how Terraform defines the shape of data. It can be thought of as +// the type information for resources, data sources, provider configuration, +// and all the other data that Terraform sends to providers. It is how +// providers express their requirements for that data. +type Schema struct { + // Version indicates which version of the schema this is. Versions + // should be monotonically incrementing numbers. When Terraform + // encounters a resource stored in state with a schema version lower + // that the schema version the provider advertises for that resource, + // Terraform requests the provider upgrade the resource's state. + Version int64 + + // Block is the root level of the schema, the collection of attributes + // and blocks that make up a resource, data source, provider, or other + // configuration block. + Block *SchemaBlock +} + +// SchemaBlock represents a block in a schema. Blocks are how Terraform creates +// groupings of attributes. In configurations, they don't use the equals sign +// and use dynamic instead of list comprehensions. +// +// Blocks will show up in state and config Values as a tftypes.Object, with the +// attributes and nested blocks defining the tftypes.Object's AttributeTypes. +type SchemaBlock struct { + // TODO: why do we have version in the block, too? + Version int64 + + // Attributes are the attributes defined within the block. These are + // the fields that users can set using the equals sign or reference in + // interpolations. + Attributes []*SchemaAttribute + + // BlockTypes are the nested blocks within the block. These are used to + // have blocks within blocks. + BlockTypes []*SchemaNestedBlock + + // Description offers an end-user friendly description of what the + // block is for. This will be surfaced to users through editor + // integrations, documentation generation, and other settings. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Deprecated, when set to true, indicates that a block should no + // longer be used and users should migrate away from it. At the moment + // it is unused and will have no impact, but it will be used in future + // tooling that is powered by provider schemas to enable richer user + // experiences. Providers should set it when deprecating blocks in + // preparation for these tools. + Deprecated bool +} + +// SchemaAttribute represents a single attribute within a schema block. +// Attributes are the fields users can set in configuration using the equals +// sign, can assign to variables, can interpolate, and can use list +// comprehensions on. +type SchemaAttribute struct { + // Name is the name of the attribute. This is what the user will put + // before the equals sign to assign a value to this attribute. + Name string + + // Type indicates the type of data the attribute expects. See the + // documentation for the tftypes package for information on what types + // are supported and their behaviors. + Type tftypes.Type + + // Description offers an end-user friendly description of what the + // attribute is for. This will be surfaced to users through editor + // integrations, documentation generation, and other settings. + Description string + + // Required, when set to true, indicates that this attribute must have + // a value assigned to it by the user or Terraform will throw an error. + Required bool + + // Optional, when set to true, indicates that the user does not need to + // supply a value for this attribute, but may. + Optional bool + + // Computed, when set to true, indicates the the provider will supply a + // value for this field. If Optional and Required are false and + // Computed is true, the user will not be able to specify a value for + // this field without Terraform throwing an error. If Optional is true + // and Computed is true, the user can specify a value for this field, + // but the provider may supply a value if the user does not. It is + // always a violation of Terraform's protocol to substitute a value for + // what the user entered, even if Computed is true. + Computed bool + + // Sensitive, when set to true, indicates that the contents of this + // attribute should be considered sensitive and not included in output. + // This does not encrypt or otherwise protect these values in state, it + // only offers protection from them showing up in plans or other + // output. + Sensitive bool + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Deprecated, when set to true, indicates that a attribute should no + // longer be used and users should migrate away from it. At the moment + // it is unused and will have no impact, but it will be used in future + // tooling that is powered by provider schemas to enable richer user + // experiences. Providers should set it when deprecating attributes in + // preparation for these tools. + Deprecated bool +} + +// SchemaNestedBlock is a nested block within another block. See SchemaBlock +// for more information on blocks. +type SchemaNestedBlock struct { + // TypeName is the name of the block. It is what the user will specify + // when using the block in configuration. + TypeName string + + // Block is the block being nested inside another block. See the + // SchemaBlock documentation for more information on blocks. + Block *SchemaBlock + + // Nesting is the kind of nesting the block is using. Different nesting + // modes have different behaviors and imply different kinds of data. + Nesting SchemaNestedBlockNestingMode + + // MinItems is the minimum number of instances of this block that a + // user must specify or Terraform will return an error. + // + // MinItems can only be set for SchemaNestedBlockNestingModeList and + // SchemaNestedBlockNestingModeSet. SchemaNestedBlockNestingModeSingle + // can also set MinItems and MaxItems both to 1 to indicate that the + // block is required to be set. All other SchemaNestedBlockNestingModes + // must leave MinItems set to 0. + MinItems int64 + + // MaxItems is the maximum number of instances of this block that a + // user may specify before Terraform returns an error. + // + // MaxItems can only be set for SchemaNestedBlockNestingModeList and + // SchemaNestedBlockNestingModeSet. SchemaNestedBlockNestingModeSingle + // can also set MinItems and MaxItems both to 1 to indicate that the + // block is required to be set. All other SchemaNestedBlockNestingModes + // must leave MaxItems set to 0. + MaxItems int64 +} + +// SchemaNestedBlockNestingMode indicates the nesting mode for +// SchemaNestedBlocks. The nesting mode determines the number of instances of +// the block allowed, how many labels the block expects, and the data structure +// used for the block in config and state values. +type SchemaNestedBlockNestingMode int32 + +func (s SchemaNestedBlockNestingMode) String() string { + switch s { + case 0: + return "INVALID" + case 1: + return "SINGLE" + case 2: + return "LIST" + case 3: + return "SET" + case 4: + return "MAP" + case 5: + return "GROUP" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/doc.go new file mode 100644 index 00000000..0d545c2b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/doc.go @@ -0,0 +1,6 @@ +// Package tf5server implementations a server implementation for running +// tfprotov5.ProviderServers as gRPC servers. +// +// Providers will likely be calling tf5server.Serve from their main function to +// start the server so Terraform can connect to it. +package tf5server diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/plugin.go new file mode 100644 index 00000000..efa32947 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/plugin.go @@ -0,0 +1,45 @@ +package tf5server + +import ( + "context" + "errors" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin is an implementation of the +// github.com/hashicorp/go-plugin#Plugin and +// github.com/hashicorp/go-plugin#GRPCPlugin interfaces, indicating how to +// serve tfprotov5.ProviderServers as gRPC plugins for go-plugin. +type GRPCProviderPlugin struct { + GRPCProvider func() tfprotov5.ProviderServer +} + +// Server always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// Client always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// GRPCClient always returns an error; we're only implementing the server half +// of the interface. +func (p *GRPCProviderPlugin) GRPCClient(context.Context, *plugin.GRPCBroker, *grpc.ClientConn) (interface{}, error) { + return nil, errors.New("terraform-plugin-go only implements gRPC servers") +} + +// GRPCServer registers the gRPC provider server with the gRPC server that +// go-plugin is standing up. +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + tfplugin5.RegisterProviderServer(s, New(p.GRPCProvider())) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go new file mode 100644 index 00000000..34da72a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go @@ -0,0 +1,358 @@ +package tf5server + +import ( + "context" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto" +) + +// ServeOpt is an interface for defining options that can be passed to the +// Serve function. Each implementation modifies the ServeConfig being +// generated. A slice of ServeOpts then, cumulatively applied, render a full +// ServeConfig. +type ServeOpt interface { + ApplyServeOpt(*ServeConfig) error +} + +// ServeConfig contains the configured options for how a provider should be +// served. +type ServeConfig struct { + logger hclog.Logger + debugCtx context.Context + debugCh chan *plugin.ReattachConfig + debugCloseCh chan struct{} +} + +type serveConfigFunc func(*ServeConfig) error + +func (s serveConfigFunc) ApplyServeOpt(in *ServeConfig) error { + return s(in) +} + +// WithDebug returns a ServeOpt that will set the server into debug mode, using +// the passed options to populate the go-plugin ServeTestConfig. +func WithDebug(ctx context.Context, config chan *plugin.ReattachConfig, closeCh chan struct{}) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.debugCtx = ctx + in.debugCh = config + in.debugCloseCh = closeCh + return nil + }) +} + +// WithGoPluginLogger returns a ServeOpt that will set the logger that +// go-plugin should use to log messages. +func WithGoPluginLogger(logger hclog.Logger) ServeOpt { + return serveConfigFunc(func(in *ServeConfig) error { + in.logger = logger + return nil + }) +} + +// Serve starts a tfprotov5.ProviderServer serving, ready for Terraform to +// connect to it. The name passed in should be the fully qualified name that +// users will enter in the source field of the required_providers block, like +// "registry.terraform.io/hashicorp/time". +// +// Zero or more options to configure the server may also be passed. The default +// invocation is sufficient, but if the provider wants to run in debug mode or +// modify the logger that go-plugin is using, ServeOpts can be specified to +// support that. +func Serve(name string, serverFactory func() tfprotov5.ProviderServer, opts ...ServeOpt) error { + var conf ServeConfig + for _, opt := range opts { + err := opt.ApplyServeOpt(&conf) + if err != nil { + return err + } + } + serveConfig := &plugin.ServeConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 5, + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", + }, + Plugins: plugin.PluginSet{ + "provider": &GRPCProviderPlugin{ + GRPCProvider: serverFactory, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + } + if conf.logger != nil { + serveConfig.Logger = conf.logger + } + if conf.debugCh != nil { + serveConfig.Test = &plugin.ServeTestConfig{ + Context: conf.debugCtx, + ReattachConfigCh: conf.debugCh, + CloseCh: conf.debugCloseCh, + } + } + plugin.Serve(serveConfig) + return nil +} + +type server struct { + downstream tfprotov5.ProviderServer + tfplugin5.UnimplementedProviderServer + + stopMu sync.Mutex + stopCh chan struct{} +} + +func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { + select { + case <-ctx.Done(): + return + case <-stopCh: + cancel() + } +} + +// stoppableContext returns a context that wraps `ctx` but will be canceled +// when the server's stopCh is closed. +// +// This is used to cancel all in-flight contexts when the Stop method of the +// server is called. +func (s *server) stoppableContext(ctx context.Context) context.Context { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + go mergeStop(stoppable, cancel, s.stopCh) + return stoppable +} + +// New converts a tfprotov5.ProviderServer into a server capable of handling +// Terraform protocol requests and issuing responses using the gRPC types. +func New(serve tfprotov5.ProviderServer) tfplugin5.ProviderServer { + return &server{ + downstream: serve, + stopCh: make(chan struct{}), + } +} + +func (s *server) GetSchema(ctx context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.GetProviderSchemaRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.GetProviderSchema(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.GetProviderSchema_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) PrepareProviderConfig(ctx context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.PrepareProviderConfigRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.PrepareProviderConfig(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.PrepareProviderConfig_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) Configure(ctx context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ConfigureProviderRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ConfigureProvider(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.Configure_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +// stop closes the stopCh associated with the server and replaces it with a new +// one. +// +// This causes all in-flight requests for the server to have their contexts +// canceled. +func (s *server) stop() { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + close(s.stopCh) + s.stopCh = make(chan struct{}) +} + +func (s *server) Stop(ctx context.Context, req *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.StopProviderRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.StopProvider(ctx, r) + if err != nil { + return nil, err + } + s.stop() + ret, err := toproto.Stop_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) ValidateDataSourceConfig(ctx context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ValidateDataSourceConfigRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ValidateDataSourceConfig(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.ValidateDataSourceConfig_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) ReadDataSource(ctx context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ReadDataSourceRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ReadDataSource(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.ReadDataSource_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) ValidateResourceTypeConfig(ctx context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ValidateResourceTypeConfigRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ValidateResourceTypeConfig(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.ValidateResourceTypeConfig_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) UpgradeResourceState(ctx context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.UpgradeResourceStateRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.UpgradeResourceState(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.UpgradeResourceState_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) ReadResource(ctx context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ReadResourceRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ReadResource(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.ReadResource_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) PlanResourceChange(ctx context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.PlanResourceChangeRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.PlanResourceChange(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.PlanResourceChange_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) ApplyResourceChange(ctx context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ApplyResourceChangeRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ApplyResourceChange(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.ApplyResourceChange_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} + +func (s *server) ImportResourceState(ctx context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { + ctx = s.stoppableContext(ctx) + r, err := fromproto.ImportResourceStateRequest(req) + if err != nil { + return nil, err + } + resp, err := s.downstream.ImportResourceState(ctx, r) + if err != nil { + return nil, err + } + ret, err := toproto.ImportResourceState_Response(resp) + if err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go new file mode 100644 index 00000000..d66ed5c5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go @@ -0,0 +1,79 @@ +package tfprotov5 + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +// ErrUnknownRawStateType is returned when a RawState has no Flatmap or JSON +// bytes set. This should never be returned during the normal operation of a +// provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `RawState` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownRawStateType = errors.New("RawState had no JSON or flatmap data set") + +// RawState is the raw, undecoded state for providers to upgrade. It is +// undecoded as Terraform, for whatever reason, doesn't have the previous +// schema available to it, and so cannot decode the state itself and pushes +// that responsibility off onto providers. +// +// It is safe to assume that Flatmap can be ignored for any state written by +// Terraform 0.12.0 or higher, but it is not safe to assume that all states +// written by 0.12.0 or higher will be in JSON format; future versions may +// switch to an alternate encoding for states. +type RawState struct { + JSON []byte + Flatmap map[string]string +} + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the RawState in an easy-to-interact-with way. It is the +// main purpose of the RawState type, and is how provider developers should +// obtain state values from the UpgradeResourceState RPC call. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same. Objects and maps all look the same, as well, as do +// user-specified values when DynamicPseudoType is used in the schema. +// Fortunately, the provider should already know the type; it should be the +// type of the schema, or DynamicPseudoType if that's what's in the schema. +// `Unmarshal` will then parse the value as though it belongs to that type, if +// possible, and return a `tftypes.Value` with the appropriate information. If +// the data can't be interpreted as that type, an error will be returned saying +// so. In these cases, double check to make sure the schema is declaring the +// same type being passed into `Unmarshal`. +// +// In the event an ErrUnknownRawStateType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `RawState` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `RawState`s received +// from RPC requests. +// +// State files written before Terraform 0.12 that haven't been upgraded yet +// cannot be unmarshaled, and must have their Flatmap property read directly. +func (s RawState) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if s.JSON != nil { + return jsonUnmarshal(s.JSON, typ, tftypes.AttributePath{}) + } + if s.Flatmap != nil { + return tftypes.Value{}, fmt.Errorf("flatmap states cannot be unmarshaled, only states written by Terraform 0.12 and higher can be unmarshaled") + } + return tftypes.Value{}, ErrUnknownRawStateType +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/string_kind.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/string_kind.go new file mode 100644 index 00000000..35d89d03 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/string_kind.go @@ -0,0 +1,25 @@ +package tfprotov5 + +const ( + // StringKindPlain indicates a string is plaintext, and should be + // interpreted as having no formatting information. + StringKindPlain StringKind = 0 + + // StringKindMarkdown indicates a string is markdown-formatted, and + // should be rendered using a Markdown renderer to correctly display + // its formatting. + StringKindMarkdown StringKind = 1 +) + +// StringKind indicates a formatting or encoding scheme for a string. +type StringKind int32 + +func (s StringKind) String() string { + switch s { + case 0: + return "PLAIN" + case 1: + return "MARKDOWN" + } + return "UNKNOWN" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/attribute_path.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/attribute_path.go new file mode 100644 index 00000000..90fb9c32 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/attribute_path.go @@ -0,0 +1,211 @@ +package tftypes + +import ( + "errors" + "fmt" +) + +var ( + // ErrNotAttributePathStepper is returned when a type that doesn't full + // the AttributePathStepper interface is passed to WalkAttributePath. + ErrNotAttributePathStepper = errors.New("doesn't fill tftypes.AttributePathStepper interface") + + // ErrInvalidStep is returned when an AttributePath has the wrong kind + // of AttributePathStep for the type that WalkAttributePath is + // operating on. + ErrInvalidStep = errors.New("step cannot be applied to this value") +) + +// AttributePath is a type that can point to a specific value within an +// aggregate Terraform value. It consists of steps, each identifying one +// element or attribute of the current value, and making that the current +// value. This allows referring to arbitrarily precise values. +type AttributePath struct { + // Steps are the steps that must be followed from the root of the value + // to obtain the value being indicated. + Steps []AttributePathStep +} + +// NewErrorf returns an error associated with the value indicated by `a`. This +// is equivalent to calling a.NewError(fmt.Errorf(f, args...)). +func (a AttributePath) NewErrorf(f string, args ...interface{}) error { + return attributePathError{ + error: fmt.Errorf(f, args...), + path: a, + } +} + +// NewError returns an error that associates `err` with the value indicated by +// `a`. +func (a AttributePath) NewError(err error) error { + return attributePathError{ + error: err, + path: a, + } +} + +// WithAttributeName adds an AttributeName step to `a`, using `name` as the +// attribute's name. +func (a *AttributePath) WithAttributeName(name string) { + a.Steps = append(a.Steps, AttributeName(name)) +} + +// WithElementKeyString adds an ElementKeyString step to `a`, using `key` as +// the element's key. +func (a *AttributePath) WithElementKeyString(key string) { + a.Steps = append(a.Steps, ElementKeyString(key)) +} + +// WithElementKeyInt adds an ElementKeyInt step to `a`, using `key` as the +// element's key. +func (a *AttributePath) WithElementKeyInt(key int64) { + a.Steps = append(a.Steps, ElementKeyInt(key)) +} + +// WithElementKeyValue adds an ElementKeyValue to `a`, using `key` as the +// element's key. +func (a *AttributePath) WithElementKeyValue(key Value) { + a.Steps = append(a.Steps, ElementKeyValue(key)) +} + +// WithoutLastStep removes the last step, whatever kind of step it was, from +// `a`. +func (a *AttributePath) WithoutLastStep() { + a.Steps = a.Steps[:len(a.Steps)-1] +} + +// AttributePathStep is an intentionally unimplementable interface that +// functions as an enum, allowing us to use different strongly-typed step types +// as a generic "step" type. +// +// An AttributePathStep is meant to indicate a single step in an AttributePath, +// indicating a specific attribute or element that is the next value in the +// path. +type AttributePathStep interface { + unfulfillable() // make this interface fillable only by this package +} + +var ( + _ AttributePathStep = AttributeName("") + _ AttributePathStep = ElementKeyString("") + _ AttributePathStep = ElementKeyInt(0) +) + +// AttributeName is an AttributePathStep implementation that indicates the next +// step in the AttributePath is to select an attribute. The value of the +// AttributeName is the name of the attribute to be selected. +type AttributeName string + +func (a AttributeName) unfulfillable() {} + +// ElementKeyString is an AttributePathStep implementation that indicates the +// next step in the AttributePath is to select an element using a string key. +// The value of the ElementKeyString is the key of the element to select. +type ElementKeyString string + +func (e ElementKeyString) unfulfillable() {} + +// ElementKeyInt is an AttributePathStep implementation that indicates the next +// step in the AttributePath is to select an element using an int64 key. The +// value of the ElementKeyInt is the key of the element to select. +type ElementKeyInt int64 + +func (e ElementKeyInt) unfulfillable() {} + +// ElementKeyValue is an AttributePathStep implementation that indicates the +// next step in the AttributePath is to select an element using the element +// itself as a key. The value of the ElementKeyValue is the key of the element +// to select. +type ElementKeyValue Value + +func (e ElementKeyValue) unfulfillable() {} + +// AttributePathStepper is an interface that types can implement to make them +// traversable by WalkAttributePath, allowing providers to retrieve the +// specific value an AttributePath is pointing to. +type AttributePathStepper interface { + // Return the attribute or element the AttributePathStep is referring + // to, or an error if the AttributePathStep is referring to an + // attribute or element that doesn't exist. + ApplyTerraform5AttributePathStep(AttributePathStep) (interface{}, error) +} + +// WalkAttributePath will return the value that `path` is pointing to, using +// `in` as the root. If an error is returned, the AttributePath returned will +// indicate the steps that remained to be applied when the error was +// encountered. +// +// map[string]interface{} and []interface{} types have built-in support. Other +// types need to use the AttributePathStepper interface to tell +// WalkAttributePath how to traverse themselves. +func WalkAttributePath(in interface{}, path AttributePath) (interface{}, AttributePath, error) { + if len(path.Steps) < 1 { + return in, path, nil + } + stepper, ok := in.(AttributePathStepper) + if !ok { + stepper, ok = builtinAttributePathStepper(in) + if !ok { + return in, path, ErrNotAttributePathStepper + } + } + next, err := stepper.ApplyTerraform5AttributePathStep(path.Steps[0]) + if err != nil { + return in, path, err + } + path.Steps = path.Steps[1:] + return WalkAttributePath(next, path) +} + +func builtinAttributePathStepper(in interface{}) (AttributePathStepper, bool) { + switch v := in.(type) { + case map[string]interface{}: + return mapStringInterfaceAttributePathStepper(v), true + case []interface{}: + return interfaceSliceAttributePathStepper(v), true + default: + return nil, false + } +} + +type mapStringInterfaceAttributePathStepper map[string]interface{} + +func (m mapStringInterfaceAttributePathStepper) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + _, isAttributeName := step.(AttributeName) + _, isElementKeyString := step.(ElementKeyString) + if !isAttributeName && !isElementKeyString { + return nil, ErrInvalidStep + } + var stepValue string + if isAttributeName { + stepValue = string(step.(AttributeName)) + } + if isElementKeyString { + stepValue = string(step.(ElementKeyString)) + } + v, ok := m[stepValue] + if !ok { + return nil, ErrInvalidStep + } + return v, nil +} + +type interfaceSliceAttributePathStepper []interface{} + +func (i interfaceSliceAttributePathStepper) ApplyTerraform5AttributePathStep(step AttributePathStep) (interface{}, error) { + eki, isElementKeyInt := step.(ElementKeyInt) + if !isElementKeyInt { + return nil, ErrInvalidStep + } + // slices can only have items up to the max value of int + // but we get ElementKeyInt as an int64 + // we keep ElementKeyInt as an int64 and cast the length of the slice + // to int64 here because if ElementKeyInt is greater than the max value + // of int, we will always (correctly) error out here. This lets us + // confidently cast ElementKeyInt to an int below, knowing we're not + // truncating data + if int64(eki) >= int64(len(i)) { + return nil, ErrInvalidStep + } + return i[int(eki)], nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/attribute_path_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/attribute_path_error.go new file mode 100644 index 00000000..b2d8bbd0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/attribute_path_error.go @@ -0,0 +1,6 @@ +package tftypes + +type attributePathError struct { + path AttributePath + error +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/doc.go new file mode 100644 index 00000000..414a797d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/doc.go @@ -0,0 +1,57 @@ +// Package tftypes provides a type system for Terraform configuration and state +// values. +// +// Terraform's configuration and state values are stored using a collection of +// types. There are primitive types, such as strings, numbers, and booleans, +// but there are also aggregate types such as lists, sets, tuples, maps, and +// objects, which consist of multiple values of primitive types aggregated into +// a single value. There is also a dynamic pseudo-type that represents an +// unknown type. It is useful for indicating that any type of data is +// acceptable. +// +// Terraform's values map neatly onto either primitives built into Go or types +// in the Go standard library, with one exception. Terraform has the concept of +// unknown values, values that may or may not be set at a future date. These +// are distinct from null values, which indicate a value that is known to not +// be set, and are mostly encountered when a user has interpolated a computed +// field into another field; the field that is interpolated into has an unknown +// value, because the field being interpolated won't have its value known until +// apply time. +// +// To address this, the tftypes package wraps all values in a special Value +// type. This Value type is capable of holding known and unknown values, +// interrogating whether the value is known or not, and accessing the concrete +// value that Terraform sent in the cases where the value is known. A common +// pattern is to use the Value.IsKnown() method to confirm that a value is +// known, then to use the Value.As() method to retrieve the underlying data for +// use. +// +// When using the Value.As() method, certain types have built-in behavior to +// support using them as destinations for converted data: +// +// * String values can be converted into strings +// +// * Number values can be converted into *big.Floats +// +// * Boolean values can be converted into bools +// +// * List, Set, and Tuple values can be converted into a slice of Values +// +// * Map and Object values can be converted into a map with string keys and +// Value values. +// +// These defaults were chosen because they're capable of losslessly +// representing all possible values for their Terraform type, with the +// exception of null values. Converting into pointer versions of any of these +// types will correctly surface null values as well. +// +// Custom, provider-defined types can define their own conversion logic that +// will be respected by Value.As(), as well, by implementing the +// FromTerraform5Value method for that type. The FromTerraform5Value method +// accepts a Value as an argument and returns an error. The Value passed in +// will be the same Value that Value.As() was called on. The recommended +// implementation of the FromTerraform5Value method is to call Value.As() on +// the passed Value, converting it into one of the built-in types above, and +// then performing whatever type casting or conversion logic is required to +// assign the data to the provider-supplied type. +package tftypes diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/list.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/list.go new file mode 100644 index 00000000..266327c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/list.go @@ -0,0 +1,42 @@ +package tftypes + +import "fmt" + +// List is a Terraform type representing an ordered collection of elements, all +// of the same type. +type List struct { + ElementType Type +} + +// Is returns whether `t` is a List type or not. If `t` is an instance of the +// List type and its ElementType property is nil, it will return true. If `t`'s +// ElementType property is not nil, it will only return true if its ElementType +// is considered the same type as `l`'s ElementType. +func (l List) Is(t Type) bool { + v, ok := t.(List) + if !ok { + return false + } + if v.ElementType != nil { + return l.ElementType.Is(v.ElementType) + } + return ok +} + +func (l List) String() string { + return "tftypes.List" +} + +func (l List) private() {} + +// MarshalJSON returns a JSON representation of the full type signature of `l`, +// including its ElementType. +// +// Deprecated: this is not meant to be called by third-party code. +func (l List) MarshalJSON() ([]byte, error) { + elementType, err := l.ElementType.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("error marshaling tftypes.List's element type %T to JSON: %w", l.ElementType, err) + } + return []byte(`["list",` + string(elementType) + `]`), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/map.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/map.go new file mode 100644 index 00000000..73d4d042 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/map.go @@ -0,0 +1,41 @@ +package tftypes + +import "fmt" + +// Map is a Terraform type representing an unordered collection of elements, +// all of the same type, each identifiable with a unique string key. +type Map struct { + AttributeType Type +} + +// Is returns whether `t` is a Map type or not. If `t` is an instance of the +// Map type and its AttributeType property is not nil, it will only return true +// if its AttributeType is considered the same type as `m`'s AttributeType. +func (m Map) Is(t Type) bool { + v, ok := t.(Map) + if !ok { + return false + } + if v.AttributeType != nil { + return m.AttributeType.Is(v.AttributeType) + } + return ok +} + +func (m Map) String() string { + return "tftypes.Map" +} + +func (m Map) private() {} + +// MarshalJSON returns a JSON representation of the full type signature of `m`, +// including its AttributeType. +// +// Deprecated: this is not meant to be called by third-party code. +func (m Map) MarshalJSON() ([]byte, error) { + attributeType, err := m.AttributeType.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("error marshaling tftypes.Map's attribute type %T to JSON: %w", m.AttributeType, err) + } + return []byte(`["map",` + string(attributeType) + `]`), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/object.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/object.go new file mode 100644 index 00000000..7b18c9ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/object.go @@ -0,0 +1,56 @@ +package tftypes + +import "encoding/json" + +// Object is a Terraform type representing an unordered collection of +// attributes, potentially of differing types, each identifiable with a unique +// string name. The number of attributes, their names, and their types are part +// of the type signature for the Object, and so two Objects with different +// attribute names or types are considered to be distinct types. +type Object struct { + AttributeTypes map[string]Type +} + +// Is returns whether `t` is an Object type or not. If `t` is an instance of +// the Object type and its AttributeTypes property is not nil, it will only +// return true the AttributeTypes are considered the same. To be considered +// equal, the same set of keys must be present in each, and each key's value +// needs to be considered the same type between the two Objects. +func (o Object) Is(t Type) bool { + v, ok := t.(Object) + if !ok { + return false + } + if v.AttributeTypes != nil { + if len(o.AttributeTypes) != len(v.AttributeTypes) { + return false + } + for k, typ := range o.AttributeTypes { + if _, ok := v.AttributeTypes[k]; !ok { + return false + } + if !typ.Is(v.AttributeTypes[k]) { + return false + } + } + } + return ok +} + +func (o Object) String() string { + return "tftypes.Object" +} + +func (o Object) private() {} + +// MarshalJSON returns a JSON representation of the full type signature of `o`, +// including the AttributeTypes. +// +// Deprecated: this is not meant to be called by third-party code. +func (o Object) MarshalJSON() ([]byte, error) { + attrs, err := json.Marshal(o.AttributeTypes) + if err != nil { + return nil, err + } + return []byte(`["object",` + string(attrs) + `]`), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/primitive.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/primitive.go new file mode 100644 index 00000000..39da56f3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/primitive.go @@ -0,0 +1,56 @@ +package tftypes + +import "fmt" + +const ( + // DynamicPseudoType is a pseudo-type in Terraform's type system that + // is used as a wildcard type. It indicates that any Terraform type can + // be used. + DynamicPseudoType = primitive("DynamicPseudoType") + + // String is a primitive type in Terraform that represents a UTF-8 + // string of bytes. + String = primitive("String") + + // Number is a primitive type in Terraform that represents a real + // number. + Number = primitive("Number") + + // Bool is a primitive type in Terraform that represents a true or + // false boolean value. + Bool = primitive("Bool") +) + +var ( + _ Type = primitive("test") +) + +type primitive string + +func (p primitive) Is(t Type) bool { + v, ok := t.(primitive) + if !ok { + return false + } + return p == v +} + +func (p primitive) String() string { + return "tftypes." + string(p) +} + +func (p primitive) private() {} + +func (p primitive) MarshalJSON() ([]byte, error) { + switch p { + case String: + return []byte(`"string"`), nil + case Number: + return []byte(`"number"`), nil + case Bool: + return []byte(`"bool"`), nil + case DynamicPseudoType: + return []byte(`"dynamic"`), nil + } + return nil, fmt.Errorf("unknown primitive type %q", p) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/set.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/set.go new file mode 100644 index 00000000..8ebdc96a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/set.go @@ -0,0 +1,42 @@ +package tftypes + +import "fmt" + +// Set is a Terraform type representing an unordered collection of unique +// elements, all of the same type. +type Set struct { + ElementType Type +} + +// Is returns whether `t` is a Set type or not. If `t` is an instance of the +// Set type and its ElementType property is nil, it will return true. If `t`'s +// ElementType property is not nil, it will only return true if its ElementType +// is considered the same type as `s`'s ElementType. +func (s Set) Is(t Type) bool { + v, ok := t.(Set) + if !ok { + return false + } + if v.ElementType != nil { + return s.ElementType.Is(v.ElementType) + } + return ok +} + +func (s Set) String() string { + return "tftypes.Set" +} + +func (s Set) private() {} + +// MarshalJSON returns a JSON representation of the full type signature of `s`, +// including its ElementType. +// +// Deprecated: this is not meant to be called by third-party code. +func (s Set) MarshalJSON() ([]byte, error) { + elementType, err := s.ElementType.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("error marshaling tftypes.Set's element type %T to JSON: %w", s.ElementType, err) + } + return []byte(`["set",` + string(elementType) + `]`), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/tuple.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/tuple.go new file mode 100644 index 00000000..2603729b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/tuple.go @@ -0,0 +1,53 @@ +package tftypes + +import "encoding/json" + +// Tuple is a Terraform type representing an ordered collection of elements, +// potentially of differing types. The number of elements and their types are +// part of the type signature for the Tuple, and so two Tuples with different +// numbers or types of elements are considered to be distinct types. +type Tuple struct { + ElementTypes []Type +} + +// Is returns whether `t` is a Tuple type or not. If `t` is an instance of the +// Tuple type and its ElementTypes property is not nil, it will only return +// true if the ElementTypes are considered the same. To be considered the same, +// there must be the same number of ElementTypes, arranged in the same order, +// and the types in each position must be considered the same as the type in +// the same position in the other Tuple. +func (tu Tuple) Is(t Type) bool { + v, ok := t.(Tuple) + if !ok { + return false + } + if v.ElementTypes != nil { + if len(v.ElementTypes) != len(tu.ElementTypes) { + return false + } + for pos, typ := range tu.ElementTypes { + if !typ.Is(v.ElementTypes[pos]) { + return false + } + } + } + return ok +} + +func (tu Tuple) String() string { + return "tftypes.Tuple" +} + +func (tu Tuple) private() {} + +// MarshalJSON returns a JSON representation of the full type signature of +// `tu`, including the ElementTypes. +// +// Deprecated: this is not meant to be called by third-party code. +func (tu Tuple) MarshalJSON() ([]byte, error) { + elements, err := json.Marshal(tu.ElementTypes) + if err != nil { + return nil, err + } + return []byte(`["tuple",` + string(elements) + `]`), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/type.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/type.go new file mode 100644 index 00000000..1e7ff193 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/type.go @@ -0,0 +1,163 @@ +package tftypes + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// Type is an interface representing a Terraform type. It is only meant to be +// implemented by the tftypes package. Types define the shape and +// characteristics of data coming from or being sent to Terraform. +type Type interface { + // Is is used to determine what type a Type implementation is. It is + // the recommended method for determining whether two types are + // equivalent or not. + Is(Type) bool + + // String returns a string representation of the Type's name. + String() string + + // MarshalJSON returns a JSON representation of the Type's signature. + // It is modeled based on Terraform's requirements for type signature + // JSON representations, and may change over time to match Terraform's + // formatting. + // + // Deprecated: this is not meant to be called by third-party code. + MarshalJSON() ([]byte, error) + + // private is meant to keep this interface from being implemented by + // types from other packages. + private() +} + +type jsonType struct { + t Type +} + +// ParseJSONType returns a Type from its JSON representation. The JSON +// representation should come from Terraform or from MarshalJSON as the format +// is not part of this package's API guarantees. +// +// Deprecated: this is not meant to be called by third-party code. +func ParseJSONType(buf []byte) (Type, error) { + var t jsonType + err := json.Unmarshal(buf, &t) + return t.t, err +} + +func (t *jsonType) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + t.t = Bool + case "number": + t.t = Number + case "string": + t.t = String + case "dynamic": + t.t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety jsonType + err = dec.Decode(&ety) + if err != nil { + return err + } + t.t = List{ + ElementType: ety.t, + } + case "map": + var ety jsonType + err = dec.Decode(&ety) + if err != nil { + return err + } + t.t = Map{ + AttributeType: ety.t, + } + case "set": + var ety jsonType + err = dec.Decode(&ety) + if err != nil { + return err + } + t.t = Set{ + ElementType: ety.t, + } + case "object": + var atys map[string]jsonType + err = dec.Decode(&atys) + if err != nil { + return err + } + types := make(map[string]Type, len(atys)) + for k, v := range atys { + types[k] = v.t + } + t.t = Object{ + AttributeTypes: types, + } + case "tuple": + var etys []jsonType + err = dec.Decode(&etys) + if err != nil { + return err + } + types := make([]Type, 0, len(etys)) + for _, ty := range etys { + types = append(types, ty.t) + } + t.t = Tuple{ + ElementTypes: types, + } + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/unknown_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/unknown_value.go new file mode 100644 index 00000000..016d3bbe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/unknown_value.go @@ -0,0 +1,9 @@ +package tftypes + +const ( + // UnknownValue represents a value that is not yet known. It can be the + // value of any type. + UnknownValue = unknown(0) +) + +type unknown byte diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go new file mode 100644 index 00000000..2b882d09 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go @@ -0,0 +1,499 @@ +package tftypes + +import ( + "bytes" + "errors" + "fmt" + "math/big" + + "github.com/google/go-cmp/cmp" + "github.com/vmihailenco/msgpack" +) + +// ValueConverter is an interface that provider-defined types can implement to +// control how Value.As will convert a Value into that type. The passed Value +// is the Value that Value.As is being called on. The intended usage is to call +// Value.As on the passed Value, converting it into a builtin type, and then +// converting or casting that builtin type to the provider-defined type. +type ValueConverter interface { + FromTerraform5Value(Value) error +} + +// ValueCreator is an interface that provider-defined types can implement to +// control how NewValue will convert that type into a Value. The returned +// interface should return one of the builtin Value representations that should +// be used for that Value. +type ValueCreator interface { + ToTerraform5Value() (interface{}, error) +} + +type msgPackUnknownType struct{} + +var msgPackUnknownVal = msgPackUnknownType{} + +func (u msgPackUnknownType) MarshalMsgpack() ([]byte, error) { + return []byte{0xd4, 0, 0}, nil +} + +// Value is a piece of data from Terraform or being returned to Terraform. It +// has a Type associated with it, defining its shape and characteristics, and a +// Go representation of that Type containing the data itself. Values are a +// special type and are not represented as pure Go values beause they can +// contain UnknownValues, which cannot be losslessly represented in Go's type +// system. +// +// The recommended usage of a Value is to check that it is known, using +// Value.IsKnown, then to convert it to a Go type, using Value.As. The Go type +// can then be manipulated. +type Value struct { + typ Type + value interface{} +} + +// NewValue returns a Value constructed using the specified Type and stores the +// passed value in it. +// +// The passed value should be in one of the builtin Value representations or +// implement the ValueCreator interface. +// +// The builtin Value representations are: +// +// * String: string, *string +// +// * Number: *big.Float, int64, *int64, int32, *int32, int16, *int16, int8, *int8, int, *int, +// uint64, *uint64, uint32, *uint32, uint16, *uint16, uint8, *uint8, byte, *byte, uint, *uint, +// float64, *float64, float32, *float32 +// +// * Bool: bool, *bool +// +// * Map and Object: map[string]Value +// +// * Tuple, List, and Set: []Value +func NewValue(t Type, val interface{}) Value { + if val == nil || val == UnknownValue { + return Value{ + typ: t, + value: val, + } + } + if creator, ok := val.(ValueCreator); ok { + var err error + val, err = creator.ToTerraform5Value() + if err != nil { + panic("error creating tftypes.Value: " + err.Error()) + } + } + + switch val := val.(type) { + case *string: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: *val, + } + case *bool: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: *val, + } + case *uint: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetUint64(uint64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint64: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetUint64(uint64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint8: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint16: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint32: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int8: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int16: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int32: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int64: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(*val) + return Value{ + typ: t, + value: f, + } + case *float32: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: big.NewFloat(float64(*val)), + } + case *float64: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: big.NewFloat(*val), + } + case string, *big.Float, bool, map[string]Value, []Value: + return Value{ + typ: t, + value: val, + } + } + panic(fmt.Sprintf("unknown type %T passed to NewValue", val)) +} + +// As converts a Value into a Go value. `dst` must be set to a pointer to a +// value of a supported type for the Value's type or an implementation of the +// ValueConverter interface. +// +// For Strings, `dst` must be a pointer to a string or a pointer to a pointer +// to a string. If it's a pointer to a pointer to a string, if the Value is +// null, the pointer to the string will be set to nil. If it's a pointer to a +// string, if the Value is null, the string will be set to the empty value. +// +// For Numbers, `dst` must be a poitner to a big.Float or a pointer to a +// pointer to a big.Float. If it's a pointer to a pointer to a big.Float, if +// the Value is null, the pointer to the big.Float will be set to nil. If it's +// a pointer to a big.Float, if the Value is null, the big.Float will be set to +// 0. +// +// For Bools, `dst` must be a pointer to a bool or a pointer to a pointer to a +// bool. If it's a pointer to a pointer to a bool, if the Value is null, the +// pointer to the bool will be set to nil. If it's a pointer to a bool, if the +// Value is null, the bool will be set to false. +// +// For Maps and Objects, `dst` must be a pointer to a map[string]Value or a +// pointer to a pointer to a map[string]Value. If it's a pointer to a pointer +// to a map[string]Value, if the Value is null, the pointer to the +// map[string]Value will be set to nil. If it's a pointer to a +// map[string]Value, if the Value is null, the map[string]Value will be set to +// an empty map. +// +// For Lists, Sets, and Tuples, `dst` must be a pointer to a []Value or a +// pointer to a pointer to a []Value. If it's a pointer to a pointer to a +// []Value, if the Value is null, the poitner to []Value will be set to nil. If +// it's a pointer to a []Value, if the Value is null, the []Value will be set +// to an empty slice. +// +// Future builtin conversions may be added over time. +// +// If `val` is unknown, an error will be returned, as unknown values can't be +// represented in Go's type system. Providers should check Value.IsKnown before +// calling Value.As. +func (val Value) As(dst interface{}) error { + unmarshaler, ok := dst.(ValueConverter) + if ok { + return unmarshaler.FromTerraform5Value(val) + } + if !val.IsKnown() { + return fmt.Errorf("unmarshaling unknown values is not supported") + } + switch target := dst.(type) { + case *string: + if val.IsNull() { + *target = "" + return nil + } + v, ok := val.value.(string) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected string", val.typ, dst) + } + *target = v + return nil + case **string: + if val.IsNull() { + *target = nil + return nil + } + return val.As(*target) + case *big.Float: + if val.IsNull() { + target.Set(big.NewFloat(0)) + return nil + } + v, ok := val.value.(*big.Float) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected *big.Float", val.typ, dst) + } + target.Set(v) + return nil + case **big.Float: + if val.IsNull() { + *target = nil + return nil + } + return val.As(*target) + case *bool: + if val.IsNull() { + *target = false + return nil + } + v, ok := val.value.(bool) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected boolean", val.typ, dst) + } + *target = v + return nil + case **bool: + if val.IsNull() { + *target = nil + return nil + } + return val.As(*target) + case *map[string]Value: + if val.IsNull() { + *target = map[string]Value{} + return nil + } + v, ok := val.value.(map[string]Value) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T, expected map[string]tftypes.Value", val.typ, dst) + } + *target = v + return nil + case **map[string]Value: + if val.IsNull() { + *target = nil + return nil + } + return val.As(*target) + case *[]Value: + if val.IsNull() { + *target = []Value{} + return nil + } + v, ok := val.value.([]Value) + if !ok { + return fmt.Errorf("can't unmarshal %s into %T expected []tftypes.Value", val.typ, dst) + } + *target = v + return nil + case **[]Value: + if val.IsNull() { + *target = nil + return nil + } + return val.As(*target) + } + return fmt.Errorf("can't unmarshal into %T, needs FromTerraform5Value method", dst) +} + +// Is calls Type.Is using the Type of the Value. +func (val Value) Is(t Type) bool { + if val.typ == nil || t == nil { + return val.typ == nil && t == nil + } + return val.typ.Is(t) +} + +// IsKnown returns true if `val` is known. If `val` is an aggregate type, only +// the top level of the aggregate type is checked; elements and attributes are +// not checked. +func (val Value) IsKnown() bool { + return val.value != UnknownValue +} + +// IsFullyKnown returns true if `val` is known. If `val` is an aggregate type, +// IsFullyKnown only returns true if all elements and attributes are known, as +// well. +func (val Value) IsFullyKnown() bool { + if !val.IsKnown() { + return false + } + switch val.typ.(type) { + case primitive: + return true + case List, Set, Tuple: + for _, v := range val.value.([]Value) { + if !v.IsFullyKnown() { + return false + } + } + return true + case Map, Object: + for _, v := range val.value.(map[string]Value) { + if !v.IsFullyKnown() { + return false + } + } + return true + } + panic(fmt.Sprintf("unknown type %T", val.typ)) +} + +// IsNull returns true if the Value is null. +func (val Value) IsNull() bool { + return val.value == nil +} + +// MarshalMsgPack returns a msgpack representation of the Value. This is used +// for constructing tfprotov5.DynamicValues. +// +// Deprecated: this is not meant to be called by third parties. Don't use it. +func (val Value) MarshalMsgPack(t Type) ([]byte, error) { + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshalMsgPack(val, t, AttributePath{}, enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func unexpectedValueTypeError(p AttributePath, expected, got interface{}, typ Type) error { + return p.NewErrorf("unexpected value type %T, %s values must be of type %T", got, typ, expected) +} + +// ValueComparer returns a github.com/google/go-cmp/cmp#Option that can be used +// to tell go-cmp how to compare Values. +func ValueComparer() cmp.Option { + return cmp.Comparer(valueComparer) +} + +func numberComparer(i, j *big.Float) bool { + return (i == nil && j == nil) || (i != nil && j != nil && i.Cmp(j) == 0) +} + +func valueComparer(i, j Value) bool { + if !i.typ.Is(j.typ) { + return false + } + return cmp.Equal(i.value, j.value, cmp.Comparer(numberComparer), ValueComparer()) +} + +// TypeFromElements returns the common type that the passed elements all have +// in common. An error will be returned if the passed elements are not of the +// same type. +func TypeFromElements(elements []Value) (Type, error) { + var typ Type + for _, el := range elements { + if typ == nil { + typ = el.typ + continue + } + if !typ.Is(el.typ) { + return nil, errors.New("elements do not all have the same types") + } + } + if typ == nil { + return DynamicPseudoType, nil + } + return typ, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value_msgpack.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value_msgpack.go new file mode 100644 index 00000000..09b3b117 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value_msgpack.go @@ -0,0 +1,253 @@ +package tftypes + +import ( + "fmt" + "math" + "math/big" + "sort" + + "github.com/vmihailenco/msgpack" +) + +func marshalMsgPack(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + if typ.Is(DynamicPseudoType) && !val.Is(DynamicPseudoType) { + return marshalMsgPackDynamicPseudoType(val, typ, p, enc) + + } + if !val.IsKnown() { + err := enc.Encode(msgPackUnknownVal) + if err != nil { + return p.NewErrorf("error encoding UnknownValue: %w", err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return p.NewErrorf("error encoding null value: %w", err) + } + return nil + } + switch { + case typ.Is(String): + return marshalMsgPackString(val, typ, p, enc) + case typ.Is(Number): + return marshalMsgPackNumber(val, typ, p, enc) + case typ.Is(Bool): + return marshalMsgPackBool(val, typ, p, enc) + case typ.Is(List{}): + return marshalMsgPackList(val, typ, p, enc) + case typ.Is(Set{}): + return marshalMsgPackSet(val, typ, p, enc) + case typ.Is(Map{}): + return marshalMsgPackMap(val, typ, p, enc) + case typ.Is(Tuple{}): + return marshalMsgPackTuple(val, typ, p, enc) + case typ.Is(Object{}): + return marshalMsgPackObject(val, typ, p, enc) + } + return fmt.Errorf("unknown type %s", typ) +} + +func marshalMsgPackDynamicPseudoType(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + typeJSON, err := val.typ.MarshalJSON() + if err != nil { + return p.NewErrorf("error generating JSON for type %s: %w", val.typ, err) + } + err = enc.EncodeArrayLen(2) + if err != nil { + return p.NewErrorf("error encoding array length: %w", err) + } + err = enc.EncodeBytes(typeJSON) + if err != nil { + return p.NewErrorf("error encoding JSON type info: %w", err) + } + err = marshalMsgPack(val, val.typ, p, enc) + if err != nil { + return p.NewErrorf("error marshaling DynamicPseudoType value: %w", err) + } + return nil +} + +func marshalMsgPackString(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + s, ok := val.value.(string) + if !ok { + return unexpectedValueTypeError(p, s, val.value, typ) + } + err := enc.EncodeString(s) + if err != nil { + return p.NewErrorf("error encoding string value: %w", err) + } + return nil +} + +func marshalMsgPackNumber(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + n, ok := val.value.(*big.Float) + if !ok { + return unexpectedValueTypeError(p, n, val.value, typ) + } + if n.IsInf() { + if n.Sign() == -1 { + err := enc.EncodeFloat64(math.Inf(-1)) + if err != nil { + return p.NewErrorf("error encoding negative infinity: %w", err) + } + } else if n.Sign() == 1 { + err := enc.EncodeFloat64(math.Inf(1)) + if err != nil { + return p.NewErrorf("error encoding positive infinity: %w", err) + } + } else { + return p.NewErrorf("error encoding unknown infiniy: sign %d is unknown", n.Sign()) + } + } else if iv, acc := n.Int64(); acc == big.Exact { + err := enc.EncodeInt(iv) + if err != nil { + return p.NewErrorf("error encoding int value: %w", err) + } + } else if fv, acc := n.Float64(); acc == big.Exact { + err := enc.EncodeFloat64(fv) + if err != nil { + return p.NewErrorf("error encoding float value: %w", err) + } + } else { + err := enc.EncodeString(n.Text('f', -1)) + if err != nil { + return p.NewErrorf("error encoding number string value: %w", err) + } + } + return nil +} + +func marshalMsgPackBool(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + b, ok := val.value.(bool) + if !ok { + return unexpectedValueTypeError(p, b, val.value, typ) + } + err := enc.EncodeBool(b) + if err != nil { + return p.NewErrorf("error encoding bool value: %w", err) + } + return nil +} + +func marshalMsgPackList(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + l, ok := val.value.([]Value) + if !ok { + return unexpectedValueTypeError(p, l, val.value, typ) + } + err := enc.EncodeArrayLen(len(l)) + if err != nil { + return p.NewErrorf("error encoding list length: %w", err) + } + for pos, i := range l { + p.WithElementKeyInt(int64(pos)) + err := marshalMsgPack(i, typ.(List).ElementType, p, enc) + if err != nil { + return err + } + p.WithoutLastStep() + } + return nil +} + +func marshalMsgPackSet(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + s, ok := val.value.([]Value) + if !ok { + return unexpectedValueTypeError(p, s, val.value, typ) + } + err := enc.EncodeArrayLen(len(s)) + if err != nil { + return p.NewErrorf("error encoding set length: %w", err) + } + for _, i := range s { + p.WithElementKeyValue(i) + err := marshalMsgPack(i, typ.(Set).ElementType, p, enc) + if err != nil { + return err + } + p.WithoutLastStep() + } + return nil +} + +func marshalMsgPackMap(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + m, ok := val.value.(map[string]Value) + if !ok { + return unexpectedValueTypeError(p, m, val.value, typ) + } + err := enc.EncodeMapLen(len(m)) + if err != nil { + return p.NewErrorf("error encoding map length: %w", err) + } + for k, v := range m { + p.WithElementKeyString(k) + err := marshalMsgPack(NewValue(String, k), String, p, enc) + if err != nil { + return p.NewErrorf("error encoding map key: %w", err) + } + err = marshalMsgPack(v, typ.(Map).AttributeType, p, enc) + if err != nil { + return err + } + p.WithoutLastStep() + } + return nil +} + +func marshalMsgPackTuple(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + t, ok := val.value.([]Value) + if !ok { + return unexpectedValueTypeError(p, t, val.value, typ) + } + types := typ.(Tuple).ElementTypes + err := enc.EncodeArrayLen(len(types)) + if err != nil { + return p.NewErrorf("error encoding tuple length: %w", err) + } + for pos, v := range t { + p.WithElementKeyInt(int64(pos)) + ty := types[pos] + err := marshalMsgPack(v, ty, p, enc) + if err != nil { + return err + } + p.WithoutLastStep() + } + return nil +} + +func marshalMsgPackObject(val Value, typ Type, p AttributePath, enc *msgpack.Encoder) error { + o, ok := val.value.(map[string]Value) + if !ok { + return unexpectedValueTypeError(p, o, val.value, typ) + } + types := typ.(Object).AttributeTypes + keys := make([]string, 0, len(types)) + for k := range types { + keys = append(keys, k) + } + sort.Strings(keys) + err := enc.EncodeMapLen(len(keys)) + if err != nil { + return p.NewErrorf("error encoding object length: %w", err) + } + for _, k := range keys { + p.WithAttributeName(k) + ty := types[k] + v, ok := o[k] + if !ok { + return p.NewErrorf("no value set") + } + err := marshalMsgPack(NewValue(String, k), String, p, enc) + if err != nil { + return err + } + err = marshalMsgPack(v, ty, p, enc) + if err != nil { + return err + } + p.WithoutLastStep() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go new file mode 100644 index 00000000..3b1ced34 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go @@ -0,0 +1,104 @@ +package diag + +import ( + "errors" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// Diagnostics is a collection of Diagnostic. +// +// Developers should append and build the list of diagnostics up until a fatal +// error is reached, at which point they should return the Diagnostics to the +// SDK. +type Diagnostics []Diagnostic + +// HasError returns true is Diagnostics contains an instance of +// Severity == Error. +// +// This helper aims to mimic the go error practices of if err != nil. After any +// operation that returns Diagnostics, check that it HasError and bubble up the +// stack. +func (diags Diagnostics) HasError() bool { + for i := range diags { + if diags[i].Severity == Error { + return true + } + } + return false +} + +// Diagnostic is a contextual message intended at outlining problems in user +// configuration. +// +// It supports multiple levels of severity (Error or Warning), a short Summary +// of the problem, an optional longer Detail message that can assist the user in +// fixing the problem, as well as an AttributePath representation which +// Terraform uses to indicate where the issue took place in the user's +// configuration. +// +// A Diagnostic will typically be used to pinpoint a problem with user +// configuration, however it can still be used to present warnings or errors +// to the user without any AttributePath set. +type Diagnostic struct { + // Severity indicates the level of the Diagnostic. Currently can be set to + // either Error or Warning + Severity Severity + + // Summary is a short description of the problem, rendered above location + // information + Summary string + + // Detail is an optional second message rendered below location information + // typically used to communicate a potential fix to the user. + Detail string + + // AttributePath is a representation of the path starting from the root of + // block (resource, datasource, provider) under evaluation by the SDK, to + // the attribute that the Diagnostic should be associated to. Terraform will + // use this information to render information on where the problem took + // place in the user's configuration. + // + // It is represented with cty.Path, which is a list of steps of either + // cty.GetAttrStep (an actual attribute) or cty.IndexStep (a step with Key + // of cty.StringVal for map indexes, and cty.NumberVal for list indexes). + // + // PLEASE NOTE: While cty can support indexing into sets, the SDK and + // protocol currently do not. For any Diagnostic related to a schema.TypeSet + // or a child of that type, please terminate the path at the schema.TypeSet + // and opt for more verbose Summary and Detail to help guide the user. + // + // Validity of the AttributePath is currently the responsibility of the + // developer, Terraform should render the root block (provider, resource, + // datasource) in cases where the attribute path is invalid. + AttributePath cty.Path +} + +// Validate ensures a valid Severity and a non-empty Summary are set. +func (d Diagnostic) Validate() error { + var validSev bool + for _, sev := range severities { + if d.Severity == sev { + validSev = true + break + } + } + if !validSev { + return fmt.Errorf("invalid severity: %v", d.Severity) + } + if d.Summary == "" { + return errors.New("empty summary") + } + return nil +} + +// Severity is an enum type marking the severity level of a Diagnostic +type Severity int + +const ( + Error Severity = iota + Warning +) + +var severities = []Severity{Error, Warning} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go new file mode 100644 index 00000000..d43eab32 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go @@ -0,0 +1,39 @@ +package diag + +import "fmt" + +// FromErr will convert an error into a Diagnostics. This returns Diagnostics +// as the most common use case in Go will be handling a single error +// returned from a function. +// +// if err != nil { +// return diag.FromErr(err) +// } +func FromErr(err error) Diagnostics { + if err == nil { + return nil + } + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: err.Error(), + }, + } +} + +// Errorf creates a Diagnostics with a single Error level Diagnostic entry. +// The summary is populated by performing a fmt.Sprintf with the supplied +// values. This returns a single error in a Diagnostics as errors typically +// do not occur in multiples as warnings may. +// +// if unexpectedCondition { +// return diag.Errorf("unexpected: %s", someValue) +// } +func Errorf(format string, a ...interface{}) Diagnostics { + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: fmt.Sprintf(format, a...), + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go new file mode 100644 index 00000000..546c5e1c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go @@ -0,0 +1,116 @@ +package logging + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "syscall" + + "github.com/hashicorp/logutils" + testing "github.com/mitchellh/go-testing-interface" +) + +// These are the environmental variables that determine if we log, and if +// we log whether or not the log should go to a file. +const ( + EnvLog = "TF_LOG" // Set to True + EnvLogFile = "TF_LOG_PATH" // Set to a file + // EnvLogPathMask splits test log files by name. + EnvLogPathMask = "TF_LOG_PATH_MASK" +) + +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} + +// LogOutput determines where we should send logs (if anywhere) and the log level. +func LogOutput(t testing.T) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + if logPath := os.Getenv(EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// SetOutput checks for a log destination with LogOutput, and calls +// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses +// ioutil.Discard. Any error from LogOutout is fatal. +func SetOutput(t testing.T) { + out, err := LogOutput(t) + if err != nil { + log.Fatal(err) + } + + if out == nil { + out = ioutil.Discard + } + + log.SetOutput(out) +} + +// LogLevel returns the current log level string based the environment vars +func LogLevel() string { + envLevel := os.Getenv(EnvLog) + if envLevel == "" { + return "" + } + + logLevel := "TRACE" + if isValidLogLevel(envLevel) { + // allow following for better ux: info, Info or INFO + logLevel = strings.ToUpper(envLevel) + } else { + log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", + envLevel, ValidLevels) + } + + return logLevel +} + +// IsDebugOrHigher returns whether or not the current log level is debug or trace +func IsDebugOrHigher() bool { + level := string(LogLevel()) + return level == "DEBUG" || level == "TRACE" +} + +func isValidLogLevel(level string) bool { + for _, l := range ValidLevels { + if strings.ToUpper(level) == string(l) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/logging/transport.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go new file mode 100644 index 00000000..b406cbae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go @@ -0,0 +1,91 @@ +package resource + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +func (e *NotFoundError) Unwrap() error { + return e.LastError +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +func (e *UnexpectedStateError) Unwrap() error { + return e.LastError +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} + +func (e *TimeoutError) Unwrap() error { + return e.LastError +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/resource/id.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go new file mode 100644 index 00000000..345abf71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go @@ -0,0 +1,12 @@ +package resource + +import ( + "bytes" + "encoding/json" +) + +func unmarshalJSON(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go new file mode 100644 index 00000000..bef3a442 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -0,0 +1,234 @@ +package resource + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-exec/tfexec" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + testing "github.com/mitchellh/go-testing-interface" +) + +func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, factories map[string]func() (*schema.Provider, error), v5factories map[string]func() (tfprotov5.ProviderServer, error)) error { + // don't point to this as a test failure location + // point to whatever called it + t.Helper() + + // Run the providers in the same process as the test runner using the + // reattach behavior in Terraform. This ensures we get test coverage + // and enables the use of delve as a debugger. + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // this is needed so Terraform doesn't default to expecting protocol 4; + // we're skipping the handshake because Terraform didn't launch the + // plugins. + os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") + + // Terraform doesn't need to reach out to Checkpoint during testing. + wd.Setenv("CHECKPOINT_DISABLE", "1") + + // Terraform 0.12.X and 0.13.X+ treat namespaceless providers + // differently in terms of what namespace they default to. So we're + // going to set both variations, as we don't know which version of + // Terraform we're talking to. We're also going to allow overriding + // the host or namespace using environment variables. + var namespaces []string + host := "registry.terraform.io" + if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + namespaces = append(namespaces, v) + } else { + namespaces = append(namespaces, "-", "hashicorp") + } + if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + host = v + } + + // Spin up gRPC servers for every provider factory, start a + // WaitGroup to listen for all of the close channels. + var wg sync.WaitGroup + reattachInfo := map[string]tfexec.ReattachConfig{} + for providerName, factory := range factories { + // providerName may be returned as terraform-provider-foo, and + // we need just foo. So let's fix that. + providerName = strings.TrimPrefix(providerName, "terraform-provider-") + + provider, err := factory() + if err != nil { + return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) + } + + // keep track of the running factory, so we can make sure it's + // shut down. + wg.Add(1) + + // configure the settings our plugin will be served with + // the GRPCProviderFunc wraps a non-gRPC provider server + // into a gRPC interface, and the logger just discards logs + // from go-plugin. + opts := &plugin.ServeOpts{ + GRPCProviderFunc: func() tfprotov5.ProviderServer { + return schema.NewGRPCProviderServer(provider) + }, + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "plugintest", + Level: hclog.Trace, + Output: ioutil.Discard, + }), + NoLogOutputOverride: true, + } + + // let's actually start the provider server + config, closeCh, err := plugin.DebugServe(ctx, opts) + if err != nil { + return fmt.Errorf("unable to serve provider %q: %w", providerName, err) + } + tfexecConfig := tfexec.ReattachConfig{ + Protocol: config.Protocol, + Pid: config.Pid, + Test: config.Test, + Addr: tfexec.ReattachConfigAddr{ + Network: config.Addr.Network, + String: config.Addr.String, + }, + } + + // when the provider exits, remove one from the waitgroup + // so we can track when everything is done + go func(c <-chan struct{}) { + <-c + wg.Done() + }(closeCh) + + // set our provider's reattachinfo in our map, once + // for every namespace that different Terraform versions + // may expect. + for _, ns := range namespaces { + reattachInfo[strings.TrimSuffix(host, "/")+"/"+ + strings.TrimSuffix(ns, "/")+"/"+ + providerName] = tfexecConfig + } + } + + // Now spin up gRPC servers for every plugin-go provider factory + // in the same way. + for providerName, factory := range v5factories { + // providerName may be returned as terraform-provider-foo, and + // we need just foo. So let's fix that. + providerName = strings.TrimPrefix(providerName, "terraform-provider-") + + // If the user has supplied the same provider in both + // ProviderFactories and ProtoV5ProviderFactories, they made a + // mistake and we should exit early. + for _, ns := range namespaces { + reattachString := strings.TrimSuffix(host, "/") + "/" + + strings.TrimSuffix(ns, "/") + "/" + + providerName + if _, ok := reattachInfo[reattachString]; ok { + return fmt.Errorf("Provider %s registered in both TestCase.ProviderFactories and TestCase.ProtoV5ProviderFactories: please use one or the other, or supply a muxed provider to TestCase.ProtoV5ProviderFactories.", providerName) + } + } + + provider, err := factory() + if err != nil { + return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) + } + + // keep track of the running factory, so we can make sure it's + // shut down. + wg.Add(1) + + // configure the settings our plugin will be served with + // the GRPCProviderFunc wraps a non-gRPC provider server + // into a gRPC interface, and the logger just discards logs + // from go-plugin. + opts := &plugin.ServeOpts{ + GRPCProviderFunc: func() tfprotov5.ProviderServer { + return provider + }, + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "plugintest", + Level: hclog.Trace, + Output: ioutil.Discard, + }), + NoLogOutputOverride: true, + } + + // let's actually start the provider server + config, closeCh, err := plugin.DebugServe(ctx, opts) + if err != nil { + return fmt.Errorf("unable to serve provider %q: %w", providerName, err) + } + tfexecConfig := tfexec.ReattachConfig{ + Protocol: config.Protocol, + Pid: config.Pid, + Test: config.Test, + Addr: tfexec.ReattachConfigAddr{ + Network: config.Addr.Network, + String: config.Addr.String, + }, + } + + // when the provider exits, remove one from the waitgroup + // so we can track when everything is done + go func(c <-chan struct{}) { + <-c + wg.Done() + }(closeCh) + + // set our provider's reattachinfo in our map, once + // for every namespace that different Terraform versions + // may expect. + for _, ns := range namespaces { + reattachString := strings.TrimSuffix(host, "/") + "/" + + strings.TrimSuffix(ns, "/") + "/" + + providerName + reattachInfo[reattachString] = tfexecConfig + } + } + + // set the working directory reattach info that will tell Terraform how to + // connect to our various running servers. + wd.SetReattachInfo(reattachInfo) + + // ok, let's call whatever Terraform command the test was trying to + // call, now that we know it'll attach back to those servers we just + // started. + err := f() + if err != nil { + log.Printf("[WARN] Got error running Terraform: %s", err) + } + + // cancel the servers so they'll return. Otherwise, this closeCh won't + // get closed, and we'll hang here. + cancel() + + // wait for the servers to actually shut down; it may take a moment for + // them to clean up, or whatever. + // TODO: add a timeout here? + // PC: do we need one? The test will time out automatically... + wg.Wait() + + // once we've run the Terraform command, let's remove the reattach + // information from the WorkingDir's environment. The WorkingDir will + // persist until the next call, but the server in the reattach info + // doesn't exist anymore at this point, so the reattach info is no + // longer valid. In theory it should be overwritten in the next call, + // but just to avoid any confusing bug reports, let's just unset the + // environment variable altogether. + wd.UnsetReattachInfo() + + // return any error returned from the orchestration code running + // Terraform commands + return err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go new file mode 100644 index 00000000..6eda1993 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go @@ -0,0 +1,280 @@ +package resource + +import ( + "context" + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. A nil result represents not found. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found (nil result from Refresh) + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForStateContext watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +// +// Cancellation from the passed in context will cancel the refresh loop +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context) (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + select { + case <-time.After(conf.Delay): + case <-cancelCh: + return + } + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + case <-ctx.Done(): + close(cancelCh) + return nil, ctx.Err() + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-ctx.Done(): + log.Println("[ERROR] Context cancelation detected, abandoning grace period") + break forSelect + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// Deprecated: Please use WaitForStateContext to ensure proper plugin shutdown +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + return conf.WaitForStateContext(context.Background()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go new file mode 100644 index 00000000..7f8c2252 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go @@ -0,0 +1,294 @@ +package resource + +import ( + "encoding/json" + "fmt" + "strconv" + + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +type shimmedState struct { + state *terraform.State +} + +func shimStateFromJson(jsonState *tfjson.State) (*terraform.State, error) { + state := terraform.NewState() + state.TFVersion = jsonState.TerraformVersion + + if jsonState.Values == nil { + // the state is empty + return state, nil + } + + for key, output := range jsonState.Values.Outputs { + os, err := shimOutputState(output) + if err != nil { + return nil, err + } + state.RootModule().Outputs[key] = os + } + + ss := &shimmedState{state} + err := ss.shimStateModule(jsonState.Values.RootModule) + if err != nil { + return nil, err + } + + return state, nil +} + +func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { + os := &terraform.OutputState{ + Sensitive: so.Sensitive, + } + + switch v := so.Value.(type) { + case string: + os.Type = "string" + os.Value = v + return os, nil + case []interface{}: + os.Type = "list" + if len(v) == 0 { + os.Value = v + return os, nil + } + switch firstElem := v[0].(type) { + case string: + elements := make([]interface{}, len(v)) + for i, el := range v { + elements[i] = el.(string) + } + os.Value = elements + case bool: + elements := make([]interface{}, len(v)) + for i, el := range v { + elements[i] = el.(bool) + } + os.Value = elements + // unmarshalled number from JSON will always be json.Number + case json.Number: + elements := make([]interface{}, len(v)) + for i, el := range v { + elements[i] = el.(json.Number) + } + os.Value = elements + case []interface{}: + os.Value = v + case map[string]interface{}: + os.Value = v + default: + return nil, fmt.Errorf("unexpected output list element type: %T", firstElem) + } + return os, nil + case map[string]interface{}: + os.Type = "map" + os.Value = v + return os, nil + case bool: + os.Type = "string" + os.Value = strconv.FormatBool(v) + return os, nil + // unmarshalled number from JSON will always be json.Number + case json.Number: + os.Type = "string" + os.Value = v.String() + return os, nil + } + + return nil, fmt.Errorf("unexpected output type: %T", so.Value) +} + +func (ss *shimmedState) shimStateModule(sm *tfjson.StateModule) error { + var path addrs.ModuleInstance + + if sm.Address == "" { + path = addrs.RootModuleInstance + } else { + var diags tfdiags.Diagnostics + path, diags = addrs.ParseModuleInstanceStr(sm.Address) + if diags.HasErrors() { + return diags.Err() + } + } + + mod := ss.state.AddModule(path) + for _, res := range sm.Resources { + resourceState, err := shimResourceState(res) + if err != nil { + return err + } + + key, err := shimResourceStateKey(res) + if err != nil { + return err + } + + mod.Resources[key] = resourceState + } + + if len(sm.ChildModules) > 0 { + return fmt.Errorf("Modules are not supported. Found %d modules.", + len(sm.ChildModules)) + } + return nil +} + +func shimResourceStateKey(res *tfjson.StateResource) (string, error) { + if res.Index == nil { + return res.Address, nil + } + + var mode terraform.ResourceMode + switch res.Mode { + case tfjson.DataResourceMode: + mode = terraform.DataResourceMode + case tfjson.ManagedResourceMode: + mode = terraform.ManagedResourceMode + default: + return "", fmt.Errorf("unexpected resource mode for %q", res.Address) + } + + var index int + switch idx := res.Index.(type) { + case json.Number: + i, err := idx.Int64() + if err != nil { + return "", fmt.Errorf("unexpected index value (%q) for %q, ", + idx, res.Address) + } + index = int(i) + default: + return "", fmt.Errorf("unexpected index type (%T) for %q, "+ + "for_each is not supported", res.Index, res.Address) + } + + rsk := &terraform.ResourceStateKey{ + Mode: mode, + Type: res.Type, + Name: res.Name, + Index: index, + } + + return rsk.String(), nil +} + +func shimResourceState(res *tfjson.StateResource) (*terraform.ResourceState, error) { + sf := &shimmedFlatmap{} + err := sf.FromMap(res.AttributeValues) + if err != nil { + return nil, err + } + attributes := sf.Flatmap() + + if _, ok := attributes["id"]; !ok { + return nil, fmt.Errorf("no %q found in attributes", "id") + } + + return &terraform.ResourceState{ + Provider: res.ProviderName, + Type: res.Type, + Primary: &terraform.InstanceState{ + ID: attributes["id"], + Attributes: attributes, + Meta: map[string]interface{}{ + "schema_version": int(res.SchemaVersion), + }, + Tainted: res.Tainted, + }, + Dependencies: res.DependsOn, + }, nil +} + +type shimmedFlatmap struct { + m map[string]string +} + +func (sf *shimmedFlatmap) FromMap(attributes map[string]interface{}) error { + if sf.m == nil { + sf.m = make(map[string]string, len(attributes)) + } + + return sf.AddMap("", attributes) +} + +func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error { + for key, value := range m { + k := key + if prefix != "" { + k = fmt.Sprintf("%s.%s", prefix, key) + } + + err := sf.AddEntry(k, value) + if err != nil { + return err + } + } + + mapLength := "%" + if prefix != "" { + mapLength = fmt.Sprintf("%s.%s", prefix, "%") + } + + sf.AddEntry(mapLength, strconv.Itoa(len(m))) + + return nil +} + +func (sf *shimmedFlatmap) AddSlice(name string, elements []interface{}) error { + for i, elem := range elements { + key := fmt.Sprintf("%s.%d", name, i) + err := sf.AddEntry(key, elem) + if err != nil { + return err + } + } + + sliceLength := fmt.Sprintf("%s.#", name) + sf.AddEntry(sliceLength, strconv.Itoa(len(elements))) + + return nil +} + +func (sf *shimmedFlatmap) AddEntry(key string, value interface{}) error { + switch el := value.(type) { + case nil: + // omit the entry + return nil + case bool: + sf.m[key] = strconv.FormatBool(el) + case json.Number: + sf.m[key] = el.String() + case string: + sf.m[key] = el + case map[string]interface{}: + err := sf.AddMap(key, el) + if err != nil { + return err + } + case []interface{}: + err := sf.AddSlice(key, el) + if err != nil { + return err + } + default: + // This should never happen unless terraform-json + // changes how attributes (types) are represented. + // + // We handle all types which the JSON unmarshaler + // can possibly produce + // https://golang.org/pkg/encoding/json/#Unmarshal + + return fmt.Errorf("%q: unexpected type (%T)", key, el) + } + return nil +} + +func (sf *shimmedFlatmap) Flatmap() map[string]string { + return sf.m +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go new file mode 100644 index 00000000..fc9644b3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -0,0 +1,1053 @@ +package resource + +import ( + "errors" + "flag" + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/go-multierror" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m interface { + Run() int +}) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + + if _, err := runSweepers(regions, sweepers, *flagSweepAllowFailures); err != nil { + os.Exit(1) + } + } else { + exitCode := m.Run() + os.Exit(exitCode) + } +} + +func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures bool) (map[string]map[string]error, error) { + var sweeperErrorFound bool + sweeperRunList := make(map[string]map[string]error) + + for _, region := range regions { + region = strings.TrimSpace(region) + + var regionSweeperErrorFound bool + regionSweeperRunList := make(map[string]error) + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper, sweepers, regionSweeperRunList, allowFailures); err != nil { + if allowFailures { + continue + } + + sweeperRunList[region] = regionSweeperRunList + return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) + } + } + + log.Printf("Sweeper Tests ran successfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr == nil { + fmt.Printf("\t- %s\n", sweeper) + } else { + regionSweeperErrorFound = true + } + } + + if regionSweeperErrorFound { + sweeperErrorFound = true + log.Printf("Sweeper Tests ran unsuccessfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr != nil { + fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) + } + } + } + + sweeperRunList[region] = regionSweeperRunList + } + + if sweeperErrorFound { + return sweeperRunList, errors.New("at least one sweeper failed") + } + + return sweeperRunList, nil +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + for foundName, foundSweeper := range filterSweeperWithDependencies(name, source) { + sweepers[foundName] = foundSweeper + } + } + } + } + return sweepers +} + +// filterSweeperWithDependencies recursively returns sweeper and all dependencies. +// Since filterSweepers performs fuzzy matching, this function is used +// to perform exact sweeper and dependency lookup. +func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[string]*Sweeper { + result := make(map[string]*Sweeper) + + currentSweeper, ok := source[name] + if !ok { + log.Printf("[WARN] Sweeper has dependency (%s), but that sweeper was not found", name) + return result + } + + result[name] = currentSweeper + + for _, dependency := range currentSweeper.Dependencies { + for foundName, foundSweeper := range filterSweeperWithDependencies(dependency, source) { + result[foundName] = foundSweeper + } + } + + return result +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { + for _, dep := range s.Dependencies { + depSweeper, ok := sweepers[dep] + + if !ok { + return fmt.Errorf("sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + + if err != nil { + if allowFailures { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) + continue + } + + return err + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + log.Printf("[DEBUG] Running Sweeper (%s) in region (%s)", s.Name, region) + + runE := s.F(region) + + sweeperRunList[s.Name] = runE + + if runE != nil { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", s.Name, region, runE) + } + + return runE +} + +const TestEnvVar = "TF_ACC" + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + +// ErrorCheckFunc is a function providers can use to handle errors. +type ErrorCheckFunc func(error) error + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // ProviderFactories can be specified for the providers that are valid. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // ProtoV5ProviderFactories serves the same purpose as ProviderFactories, + // but for protocol v5 providers defined using the terraform-plugin-go + // ProviderServer interface. + ProtoV5ProviderFactories map[string]func() (tfprotov5.ProviderServer, error) + + // Providers is the ResourceProvider that will be under test. + // + // Deprecated: Providers is deprecated, please use ProviderFactories + Providers map[string]*schema.Provider + + // ExternalProviders are providers the TestCase relies on that should + // be downloaded from the registry during init. This is only really + // necessary to set if you're using import, as providers in your config + // will be automatically retrieved during init. Import doesn't use a + // config, however, so we allow manually specifying them here to be + // downloaded for import tests. + ExternalProviders map[string]ExternalProvider + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // ErrorCheck allows providers the option to handle errors such as skipping + // tests based on certain errors. + ErrorCheck ErrorCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // The settings below control the "ID-only refresh test." This is + // an enabled-by-default test that tests that a refresh can be + // refreshed with only an ID to result in the same attributes. + // This validates completeness of Refresh. + // + // IDRefreshName is the name of the resource to check. This will + // default to the first non-nil primary resource in the state. + // + // IDRefreshIgnore is a list of configuration keys that will be ignored. + IDRefreshName string + IDRefreshIgnore []string +} + +// ExternalProvider holds information about third-party providers that should +// be downloaded by Terraform as part of running the test step. +type ExternalProvider struct { + VersionConstraint string // the version constraint for the provider + Source string // the provider source +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. Note that this checks for strict equality + // and does not respect DiffSuppressFunc or CustomizeDiff. + // + // ImportStateVerifyIgnore is a list of prefixes of fields that should + // not be verified to be equal. These can be set to ephemeral fields or + // fields that can't be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t testing.T, c TestCase) { + t.Helper() + t.Parallel() + Test(t, c) +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t testing.T, c TestCase) { + t.Helper() + + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + logging.SetOutput(t) + + // Copy any explicitly passed providers to factories, this is for backwards compatibility. + if len(c.Providers) > 0 { + c.ProviderFactories = map[string]func() (*schema.Provider, error){} + + for name, p := range c.Providers { + if _, ok := c.ProviderFactories[name]; ok { + t.Fatalf("ProviderFactory for %q already exists, cannot overwrite with Provider", name) + } + prov := p + c.ProviderFactories[name] = func() (*schema.Provider, error) { + return prov, nil + } + } + } + + // Run the PreCheck if we have it. + // This is done after the auto-configure to allow providers + // to override the default auto-configure parameters. + if c.PreCheck != nil { + c.PreCheck() + } + + sourceDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting working dir: %s", err) + } + helper := plugintest.AutoInitProviderHelper(sourceDir) + defer func(helper *plugintest.Helper) { + err := helper.Close() + if err != nil { + log.Printf("Error cleaning up temporary test files: %s", err) + } + }(helper) + + runNewTest(t, c, helper) +} + +// testProviderConfig takes the list of Providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func testProviderConfig(c TestCase) (string, error) { + var lines []string + var requiredProviders []string + for p := range c.Providers { + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + } + for p, v := range c.ExternalProviders { + if _, ok := c.Providers[p]; ok { + return "", fmt.Errorf("Provider %q set in both Providers and ExternalProviders for TestCase. Must be set in only one.", p) + } + if _, ok := c.ProviderFactories[p]; ok { + return "", fmt.Errorf("Provider %q set in both ProviderFactories and ExternalProviders for TestCase. Must be set in only one.", p) + } + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + var providerBlock string + if v.VersionConstraint != "" { + providerBlock = fmt.Sprintf("%s\nversion = %q", providerBlock, v.VersionConstraint) + } + if v.Source != "" { + providerBlock = fmt.Sprintf("%s\nsource = %q", providerBlock, v.Source) + } + if providerBlock != "" { + providerBlock = fmt.Sprintf("%s = {%s\n}\n", p, providerBlock) + } + requiredProviders = append(requiredProviders, providerBlock) + } + + if len(requiredProviders) > 0 { + lines = append([]string{fmt.Sprintf("terraform {\nrequired_providers {\n%s}\n}\n\n", strings.Join(requiredProviders, ""))}, lines...) + } + + return strings.Join(lines, ""), nil +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +func UnitTest(t testing.T, c TestCase) { + t.Helper() + + c.IsUnitTest = true + Test(t, c) +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + if c.ResourceName == "" { + return nil, fmt.Errorf("ResourceName must be set in TestStep") + } + + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result *multierror.Error + + for i, f := range fs { + if err := f(s); err != nil { + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) + } + } + + return result.ErrorOrNil() + } +} + +// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value +// exists in state for the given name/key combination. It is useful when +// testing that computed values were set, when it is not possible to +// know ahead of time what the values will be. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + }) +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + }) +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + } + + return nil +} + +// TestCheckResourceAttr is a TestCheckFunc which validates +// the value in state for the given name/key combination. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + }) +} + +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + }) +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + +// TestCheckNoResourceAttr is a TestCheckFunc which ensures that +// NO value exists in state for the given name/key combination. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + }) +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + }) +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { + return nil + } + + if exists { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil +} + +// TestMatchResourceAttr is a TestCheckFunc which checks that the value +// in state for the given name/key combination matches the given regex. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + }) +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + }) +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values +// in state for a pair of name/key combinations are equal. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + }) +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err + } + + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + }) +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + if nameFirst == nameSecond && keyFirst == keySecond { + return fmt.Errorf( + "comparing self: resource %s attribute %s", + nameFirst, + keyFirst, + ) + } + + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) + } + + return is, nil +} + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} + +// indexesIntoTypeSet is a heuristic to try and identify if a flatmap style +// string address uses a precalculated TypeSet hash, which are integers and +// typically are large and obviously not a list index +func indexesIntoTypeSet(key string) bool { + for _, part := range strings.Split(key, ".") { + if i, err := strconv.Atoi(part); err == nil && i > 100 { + return true + } + } + return false +} + +func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && indexesIntoTypeSet(key) { + return fmt.Errorf("Error in test check: %s\nTest check address %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, key) + } + return err + } +} + +func checkIfIndexesIntoTypeSetPair(keyFirst, keySecond string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { + return fmt.Errorf("Error in test check: %s\nTest check address %q or %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, keyFirst, keySecond) + } + return err + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go new file mode 100644 index 00000000..1e39294f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go @@ -0,0 +1,25 @@ +package resource + +import ( + "errors" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go new file mode 100644 index 00000000..c9d90dd2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -0,0 +1,268 @@ +package resource + +import ( + "fmt" + "log" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + tfjson "github.com/hashicorp/terraform-json" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func runPostTestDestroy(t testing.T, c TestCase, wd *plugintest.WorkingDir, factories map[string]func() (*schema.Provider, error), v5factories map[string]func() (tfprotov5.ProviderServer, error), statePreDestroy *terraform.State) error { + t.Helper() + + err := runProviderCommand(t, func() error { + return wd.Destroy() + }, wd, factories, v5factories) + if err != nil { + return err + } + + if c.CheckDestroy != nil { + if err := c.CheckDestroy(statePreDestroy); err != nil { + return err + } + } + + return nil +} + +func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { + t.Helper() + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + wd := helper.RequireNewWorkingDir(t) + + defer func() { + var statePreDestroy *terraform.State + var err error + err = runProviderCommand(t, func() error { + statePreDestroy, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + t.Fatalf("Error retrieving state, there may be dangling resources: %s", err.Error()) + return + } + + if !stateIsEmpty(statePreDestroy) { + err := runPostTestDestroy(t, c, wd, c.ProviderFactories, c.ProtoV5ProviderFactories, statePreDestroy) + if err != nil { + t.Fatalf("Error running post-test destroy, there may be dangling resources: %s", err.Error()) + } + } + + wd.Close() + }() + + providerCfg, err := testProviderConfig(c) + if err != nil { + t.Fatal(err) + } + + err = wd.SetConfig(providerCfg) + if err != nil { + t.Fatalf("Error setting test config: %s", err) + } + err = runProviderCommand(t, func() error { + return wd.Init() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + t.Fatalf("Error running init: %s", err.Error()) + return + } + + // use this to track last step succesfully applied + // acts as default for import tests + var appliedCfg string + + for i, step := range c.Steps { + if step.PreConfig != nil { + step.PreConfig() + } + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d/%d", i+1, len(c.Steps)) + continue + } + } + + if step.ImportState { + err := testStepNewImportState(t, c, helper, wd, step, appliedCfg) + if step.ExpectError != nil { + if err == nil { + t.Fatalf("Step %d/%d error running import: expected an error but got none", i+1, len(c.Steps)) + } + if !step.ExpectError.MatchString(err.Error()) { + t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", i+1, len(c.Steps), step.ExpectError.String(), err) + } + } else { + if err != nil && c.ErrorCheck != nil { + err = c.ErrorCheck(err) + } + if err != nil { + t.Fatalf("Step %d/%d error running import: %s", i+1, len(c.Steps), err) + } + } + continue + } + + if step.Config != "" { + err := testStepNewConfig(t, c, wd, step) + if step.ExpectError != nil { + if err == nil { + t.Fatalf("Step %d/%d, expected an error but got none", i+1, len(c.Steps)) + } + if !step.ExpectError.MatchString(err.Error()) { + t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) + } + } else { + if err != nil && c.ErrorCheck != nil { + err = c.ErrorCheck(err) + } + if err != nil { + t.Fatalf("Step %d/%d error: %s", i+1, len(c.Steps), err) + } + } + appliedCfg = step.Config + continue + } + + t.Fatal("Unsupported test mode") + } +} + +func getState(t testing.T, wd *plugintest.WorkingDir) (*terraform.State, error) { + t.Helper() + + jsonState, err := wd.State() + if err != nil { + return nil, err + } + state, err := shimStateFromJson(jsonState) + if err != nil { + t.Fatal(err) + } + return state, nil +} + +func stateIsEmpty(state *terraform.State) bool { + return state.Empty() || !state.HasResources() +} + +func planIsEmpty(plan *tfjson.Plan) bool { + for _, rc := range plan.ResourceChanges { + for _, a := range rc.Change.Actions { + if a != tfjson.ActionNoop { + return false + } + } + } + return true +} + +func testIDRefresh(c TestCase, t testing.T, wd *plugintest.WorkingDir, step TestStep, r *terraform.ResourceState) error { + t.Helper() + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := terraform.NewState() + state.RootModule().Resources = make(map[string]*terraform.ResourceState) + state.RootModule().Resources[c.IDRefreshName] = &terraform.ResourceState{} + + // Temporarily set the config to a minimal provider config for the refresh + // test. After the refresh we can reset it. + cfg, err := testProviderConfig(c) + if err != nil { + return err + } + err = wd.SetConfig(cfg) + if err != nil { + t.Fatalf("Error setting import test config: %s", err) + } + defer func() { + err = wd.SetConfig(step.Config) + if err != nil { + t.Fatalf("Error resetting test config: %s", err) + } + }() + + // Refresh! + err = runProviderCommand(t, func() error { + err = wd.Refresh() + if err != nil { + t.Fatalf("Error running terraform refresh: %s", err) + } + state, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return err + } + + // Verify attribute equivalence. + actualR := state.RootModule().Resources[c.IDRefreshName] + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Primary == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Primary.Attributes + expected := r.Primary.Attributes + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return fmt.Errorf( + "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go new file mode 100644 index 00000000..0e218f60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -0,0 +1,244 @@ +package resource + +import ( + "errors" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step TestStep) error { + t.Helper() + + var idRefreshCheck *terraform.ResourceState + idRefresh := c.IDRefreshName != "" + + if !step.Destroy { + var state *terraform.State + var err error + err = runProviderCommand(t, func() error { + state, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return err + } + if err := testStepTaint(state, step); err != nil { + return fmt.Errorf("Error when tainting resources: %s", err) + } + } + + err := wd.SetConfig(step.Config) + if err != nil { + return fmt.Errorf("Error setting config: %w", err) + } + + // require a refresh before applying + // failing to do this will result in data sources not being updated + err = runProviderCommand(t, func() error { + return wd.Refresh() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error running pre-apply refresh: %w", err) + } + + // If this step is a PlanOnly step, skip over this first Plan and + // subsequent Apply, and use the follow-up Plan that checks for + // permadiffs + if !step.PlanOnly { + // Plan! + err := runProviderCommand(t, func() error { + if step.Destroy { + return wd.CreateDestroyPlan() + } + return wd.CreatePlan() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error running pre-apply plan: %w", err) + } + + // We need to keep a copy of the state prior to destroying such + // that the destroy steps can verify their behavior in the + // check function + var stateBeforeApplication *terraform.State + err = runProviderCommand(t, func() error { + stateBeforeApplication, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error retrieving pre-apply state: %w", err) + } + + // Apply the diff, creating real resources + err = runProviderCommand(t, func() error { + return wd.Apply() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + if step.Destroy { + return fmt.Errorf("Error running destroy: %w", err) + } + return fmt.Errorf("Error running apply: %w", err) + } + + // Get the new state + var state *terraform.State + err = runProviderCommand(t, func() error { + state, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error retrieving state after apply: %w", err) + } + + // Run any configured checks + if step.Check != nil { + state.IsBinaryDrivenTest = true + if step.Destroy { + if err := step.Check(stateBeforeApplication); err != nil { + return fmt.Errorf("Check failed: %w", err) + } + } else { + if err := step.Check(state); err != nil { + return fmt.Errorf("Check failed: %w", err) + } + } + } + } + + // Test for perpetual diffs by performing a plan, a refresh, and another plan + + // do a plan + err = runProviderCommand(t, func() error { + if step.Destroy { + return wd.CreateDestroyPlan() + } + return wd.CreatePlan() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error running post-apply plan: %w", err) + } + + var plan *tfjson.Plan + err = runProviderCommand(t, func() error { + var err error + plan, err = wd.SavedPlan() + return err + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error retrieving post-apply plan: %w", err) + } + + if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { + var stdout string + err = runProviderCommand(t, func() error { + var err error + stdout, err = wd.SavedPlanRawStdout() + return err + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error retrieving formatted plan output: %w", err) + } + return fmt.Errorf("After applying this test step, the plan was not empty.\nstdout:\n\n%s", stdout) + } + + // do a refresh + if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { + err := runProviderCommand(t, func() error { + return wd.Refresh() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error running post-apply refresh: %w", err) + } + } + + // do another plan + err = runProviderCommand(t, func() error { + if step.Destroy { + return wd.CreateDestroyPlan() + } + return wd.CreatePlan() + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error running second post-apply plan: %w", err) + } + + err = runProviderCommand(t, func() error { + var err error + plan, err = wd.SavedPlan() + return err + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error retrieving second post-apply plan: %w", err) + } + + // check if plan is empty + if !planIsEmpty(plan) && !step.ExpectNonEmptyPlan { + var stdout string + err = runProviderCommand(t, func() error { + var err error + stdout, err = wd.SavedPlanRawStdout() + return err + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return fmt.Errorf("Error retrieving formatted second plan output: %w", err) + } + return fmt.Errorf("After applying this test step and performing a `terraform refresh`, the plan was not empty.\nstdout\n\n%s", stdout) + } else if step.ExpectNonEmptyPlan && planIsEmpty(plan) { + return errors.New("Expected a non-empty plan, but got an empty plan") + } + + // ID-ONLY REFRESH + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + var state *terraform.State + err = runProviderCommand(t, func() error { + state, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return err + } + if idRefresh && idRefreshCheck == nil && !state.Empty() { + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + if err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil { + return fmt.Errorf( + "[ERROR] Test: ID-only test failed: %s", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go new file mode 100644 index 00000000..52487ad8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -0,0 +1,221 @@ +package resource + +import ( + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewImportState(t testing.T, c TestCase, helper *plugintest.Helper, wd *plugintest.WorkingDir, step TestStep, cfg string) error { + t.Helper() + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + + if step.ResourceName == "" { + t.Fatal("ResourceName is required for an import state test") + } + + // get state from check sequence + var state *terraform.State + var err error + err = runProviderCommand(t, func() error { + state, err = getState(t, wd) + if err != nil { + return err + } + return nil + }, wd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Determine the ID to import + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + t.Fatal(err) + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: + resource, err := testResource(step, state) + if err != nil { + t.Fatal(err) + } + importId = resource.Primary.ID + } + importId = step.ImportStateIdPrefix + importId + + // Create working directory for import tests + if step.Config == "" { + step.Config = cfg + if step.Config == "" { + t.Fatal("Cannot import state with no specified config") + } + } + importWd := helper.RequireNewWorkingDir(t) + defer importWd.Close() + err = importWd.SetConfig(step.Config) + if err != nil { + t.Fatalf("Error setting test config: %s", err) + } + + err = runProviderCommand(t, func() error { + return importWd.Init() + }, importWd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + t.Fatalf("Error running init: %s", err) + } + + err = runProviderCommand(t, func() error { + return importWd.Import(step.ResourceName, importId) + }, importWd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + return err + } + + var importState *terraform.State + err = runProviderCommand(t, func() error { + importState, err = getState(t, importWd) + if err != nil { + return err + } + return nil + }, importWd, c.ProviderFactories, c.ProtoV5ProviderFactories) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Go through the imported state and verify + if step.ImportStateCheck != nil { + var states []*terraform.InstanceState + for _, r := range importState.RootModule().Resources { + if r.Primary != nil { + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) + } + } + if err := step.ImportStateCheck(states); err != nil { + t.Fatal(err) + } + } + + // Verify that all the states match + if step.ImportStateVerify { + new := importState.RootModule().Resources + old := state.RootModule().Resources + + for _, r := range new { + // Find the existing resource + var oldR *terraform.ResourceState + for r2Key, r2 := range old { + // Ensure that we do not match against data sources as they + // cannot be imported and are not what we want to verify. + // Mode is not present in ResourceState so we use the + // stringified ResourceStateKey for comparison. + if strings.HasPrefix(r2Key, "data.") { + continue + } + + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type && r2.Provider == r.Provider { + oldR = r2 + break + } + } + if oldR == nil { + t.Fatalf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + actual[k] = v + } + + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + // timeouts are only _sometimes_ added to state. To + // account for this, just don't compare timeouts at + // all. + for k := range actual { + if strings.HasPrefix(k, "timeouts.") { + delete(actual, k) + } + if k == "timeouts" { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, "timeouts.") { + delete(expected, k) + } + if k == "timeouts" { + delete(expected, k) + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + t.Fatalf( + "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go new file mode 100644 index 00000000..ada1d6e7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go @@ -0,0 +1,254 @@ +// These test helpers were developed by the AWS provider team at HashiCorp. + +package resource + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const ( + sentinelIndex = "*" +) + +// TestCheckTypeSetElemNestedAttrs is a TestCheckFunc that accepts a resource +// name, an attribute path, which should use the sentinel value '*' for indexing +// into a TypeSet. The function verifies that an element matches the whole value +// map. +// +// You may check for unset keys, however this will also match keys set to empty +// string. Please provide a map with at least 1 non-empty value. +// +// map[string]string{ +// "key1": "value", +// "key2": "", +// } +// +// Use this function over SDK provided TestCheckFunctions when validating a +// TypeSet where its elements are a nested object with their own attrs/values. +// +// Please note, if the provided value map is not granular enough, there exists +// the possibility you match an element you were not intending to, in the TypeSet. +// Provide a full mapping of attributes to be sure the unique element exists. +func TestCheckTypeSetElemNestedAttrs(name, attr string, values map[string]string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + attrParts := strings.Split(attr, ".") + if attrParts[len(attrParts)-1] != sentinelIndex { + return fmt.Errorf("%q does not end with the special value %q", attr, sentinelIndex) + } + // account for cases where the user is trying to see if the value is unset/empty + // there may be ambiguous scenarios where a field was deliberately unset vs set + // to the empty string, this will match both, which may be a false positive. + var matchCount int + for _, v := range values { + if v != "" { + matchCount++ + } + } + if matchCount == 0 { + return fmt.Errorf("%#v has no non-empty values", values) + } + + if testCheckTypeSetElemNestedAttrsInState(is, attrParts, matchCount, values) { + return nil + } + return fmt.Errorf("%q no TypeSet element %q, with nested attrs %#v in state: %#v", name, attr, values, is.Attributes) + } +} + +// TestMatchTypeSetElemNestedAttrs is a TestCheckFunc similar to TestCheckTypeSetElemNestedAttrs +// with the exception that it verifies that an element matches a *regexp.Regexp. +// +// You may check for unset keys, however this will also match keys set to empty +// string. Please provide a map with at least 1 non-empty value e.g. +// +// map[string]*regexp.Regexp{ +// "key1": regexp.MustCompile("value"), +// "key2": regexp.MustCompile(""), +// } +// +func TestMatchTypeSetElemNestedAttrs(name, attr string, values map[string]*regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + attrParts := strings.Split(attr, ".") + if attrParts[len(attrParts)-1] != sentinelIndex { + return fmt.Errorf("%q does not end with the special value %q", attr, sentinelIndex) + } + // account for cases where the user is trying to see if the value is unset/empty + // there may be ambiguous scenarios where a field was deliberately unset vs set + // to the empty string, this will match both, which may be a false positive. + var matchCount int + for _, v := range values { + if v != nil { + matchCount++ + } + } + if matchCount == 0 { + return fmt.Errorf("%#v has no non-empty values", values) + } + + if testCheckTypeSetElemNestedAttrsInState(is, attrParts, matchCount, values) { + return nil + } + return fmt.Errorf("%q no TypeSet element %q, with the regex provided, match in state: %#v", name, attr, is.Attributes) + } +} + +// TestCheckTypeSetElemAttr is a TestCheckFunc that accepts a resource +// name, an attribute path, which should use the sentinel value '*' for indexing +// into a TypeSet. The function verifies that an element matches the provided +// value. +// +// Use this function over SDK provided TestCheckFunctions when validating a +// TypeSet where its elements are a simple value +func TestCheckTypeSetElemAttr(name, attr, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + err = testCheckTypeSetElem(is, attr, value) + if err != nil { + return fmt.Errorf("%q error: %s", name, err) + } + + return nil + } +} + +// TestCheckTypeSetElemAttrPair is a TestCheckFunc that verifies a pair of name/key +// combinations are equal where the first uses the sentinel value to index into a +// TypeSet. +// +// E.g., TestCheckTypeSetElemAttrPair("aws_autoscaling_group.bar", "availability_zones.*", "data.aws_availability_zones.available", "names.0") +// E.g., TestCheckTypeSetElemAttrPair("aws_spot_fleet_request.bar", "launch_specification.*.instance_type", "data.data.aws_ec2_instance_type_offering.available", "instance_type") +func TestCheckTypeSetElemAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + vSecond, okSecond := isSecond.Attributes[keySecond] + if !okSecond { + return fmt.Errorf("%s: Attribute %q not set, cannot be checked against TypeSet", nameSecond, keySecond) + } + + return testCheckTypeSetElemPair(isFirst, keyFirst, vSecond) + } +} + +func testCheckTypeSetElem(is *terraform.InstanceState, attr, value string) error { + attrParts := strings.Split(attr, ".") + if attrParts[len(attrParts)-1] != sentinelIndex { + return fmt.Errorf("%q does not end with the special value %q", attr, sentinelIndex) + } + for stateKey, stateValue := range is.Attributes { + if stateValue == value { + stateKeyParts := strings.Split(stateKey, ".") + if len(stateKeyParts) == len(attrParts) { + for i := range attrParts { + if attrParts[i] != stateKeyParts[i] && attrParts[i] != sentinelIndex { + break + } + if i == len(attrParts)-1 { + return nil + } + } + } + } + } + + return fmt.Errorf("no TypeSet element %q, with value %q in state: %#v", attr, value, is.Attributes) +} + +func testCheckTypeSetElemPair(is *terraform.InstanceState, attr, value string) error { + attrParts := strings.Split(attr, ".") + for stateKey, stateValue := range is.Attributes { + if stateValue == value { + stateKeyParts := strings.Split(stateKey, ".") + if len(stateKeyParts) == len(attrParts) { + for i := range attrParts { + if attrParts[i] != stateKeyParts[i] && attrParts[i] != sentinelIndex { + break + } + if i == len(attrParts)-1 { + return nil + } + } + } + } + } + + return fmt.Errorf("no TypeSet element %q, with value %q in state: %#v", attr, value, is.Attributes) +} + +// testCheckTypeSetElemNestedAttrsInState is a helper function +// to determine if nested attributes and their values are equal to those +// in the instance state. Currently, the function accepts a "values" param of type +// map[string]string or map[string]*regexp.Regexp. +// Returns true if all attributes match, else false. +func testCheckTypeSetElemNestedAttrsInState(is *terraform.InstanceState, attrParts []string, matchCount int, values interface{}) bool { + matches := make(map[string]int) + + for stateKey, stateValue := range is.Attributes { + stateKeyParts := strings.Split(stateKey, ".") + // a Set/List item with nested attrs would have a flatmap address of + // at least length 3 + // foo.0.name = "bar" + if len(stateKeyParts) < 3 || len(attrParts) > len(stateKeyParts) { + continue + } + var pathMatch bool + for i := range attrParts { + if attrParts[i] != stateKeyParts[i] && attrParts[i] != sentinelIndex { + break + } + if i == len(attrParts)-1 { + pathMatch = true + } + } + if !pathMatch { + continue + } + id := stateKeyParts[len(attrParts)-1] + nestedAttr := strings.Join(stateKeyParts[len(attrParts):], ".") + + var match bool + switch t := values.(type) { + case map[string]string: + if v, keyExists := t[nestedAttr]; keyExists && v == stateValue { + match = true + } + case map[string]*regexp.Regexp: + if v, keyExists := t[nestedAttr]; keyExists && v != nil && v.MatchString(stateValue) { + match = true + } + } + if match { + matches[id] = matches[id] + 1 + if matches[id] == matchCount { + return true + } + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go new file mode 100644 index 00000000..6b7bdae8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go @@ -0,0 +1,113 @@ +package resource + +import ( + "context" + "errors" + "sync" + "time" +) + +// RetryContext is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Cancellation from the passed in context will propagate through to the +// underlying StateChangeConf +func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForStateContext(ctx) + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Deprecated: Please use RetryContext to ensure proper plugin shutdown +func Retry(timeout time.Duration, f RetryFunc) error { + return RetryContext(context.Background(), timeout, f) +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +func (e *RetryError) Unwrap() error { + return e.Err +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. To prevent logic errors, will return an error when passed a +// nil error. +func RetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/README.md rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/context.go new file mode 100644 index 00000000..ed8d0964 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/context.go @@ -0,0 +1,7 @@ +package schema + +type Key string + +var ( + StopContextKey = Key("StopContext") +) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go similarity index 79% rename from vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go index 875677eb..bb1fc1db 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go @@ -3,8 +3,41 @@ package schema import ( "fmt" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// StringKind represents the format a string is in. +type StringKind configschema.StringKind + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain = StringKind(configschema.StringPlain) + + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown = StringKind(configschema.StringMarkdown) +) + +var ( + // DescriptionKind is the default StringKind of descriptions in this provider. + // It defaults to StringPlain but can be globally switched to StringMarkdown. + DescriptionKind = StringPlain + + // SchemaDescriptionBuilder converts helper/schema.Schema Descriptions to configschema.Attribute + // and Block Descriptions. This method can be used to modify the description text prior to it + // being returned in the schema. + SchemaDescriptionBuilder = func(s *Schema) string { + return s.Description + } + + // ResourceDescriptionBuilder converts helper/schema.Resource Descriptions to configschema.Block + // Descriptions at the resource top level. This method can be used to modify the description prior + // to it being returned in the schema. + ResourceDescriptionBuilder = func(r *Resource) string { + return r.Description + } ) // The functions and methods in this file are concerned with the conversion @@ -115,13 +148,22 @@ func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { } } + desc := SchemaDescriptionBuilder(s) + descKind := configschema.StringKind(DescriptionKind) + if desc == "" { + // fallback to plain text if empty + descKind = configschema.StringPlain + } + return &configschema.Attribute{ - Type: s.coreConfigSchemaType(), - Optional: opt, - Required: reqd, - Computed: s.Computed, - Sensitive: s.Sensitive, - Description: s.Description, + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: desc, + DescriptionKind: descKind, + Deprecated: s.Deprecated != "", } } @@ -132,6 +174,17 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { ret := &configschema.NestedBlock{} if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { ret.Block = *nested + + desc := SchemaDescriptionBuilder(s) + descKind := configschema.StringKind(DescriptionKind) + if desc == "" { + // fallback to plain text if empty + descKind = configschema.StringPlain + } + // set these on the block from the attribute Schema + ret.Block.Description = desc + ret.Block.DescriptionKind = descKind + ret.Block.Deprecated = s.Deprecated != "" } switch s.Type { case TypeList: @@ -231,6 +284,18 @@ func (s *Schema) coreConfigSchemaType() cty.Type { func (r *Resource) CoreConfigSchema() *configschema.Block { block := r.coreConfigSchema() + desc := ResourceDescriptionBuilder(r) + descKind := configschema.StringKind(DescriptionKind) + if desc == "" { + // fallback to plain text if empty + descKind = configschema.StringPlain + } + + // Only apply Resource Description, Kind, Deprecation at top level + block.Description = desc + block.DescriptionKind = descKind + block.Deprecated = r.DeprecationMessage != "" + if block.Attributes == nil { block.Attributes = map[string]*configschema.Attribute{} } @@ -301,9 +366,3 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { func (r *Resource) coreConfigSchema() *configschema.Block { return schemaMap(r.Schema).CoreConfigSchema() } - -// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema -// on the backends's schema. -func (r *Backend) CoreConfigSchema() *configschema.Block { - return schemaMap(r.Schema).CoreConfigSchema() -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/equal.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go similarity index 96% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go index 2a66a068..c7721bcd 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go @@ -3,7 +3,6 @@ package schema import ( "fmt" "strconv" - "strings" ) // FieldReaders are responsible for decoding fields out of data into @@ -42,13 +41,6 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { return s.ZeroValue() } -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through. -func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { - parts := strings.Split(path, ".") - return addrToSchema(parts, schemaMap) -} - // addrToSchema finds the final element schema for the given address // and the given schema. It returns all the schemas that led to the final // schema. These are in order of the address (out to in). @@ -205,7 +197,7 @@ func readListField( // Go through each count, and get the item value out of it result := make([]interface{}, countResult.Value.(int)) - for i, _ := range result { + for i := range result { is := strconv.FormatInt(int64(i), 10) addrPadded[len(addrPadded)-1] = is rawResult, err := r.ReadField(addrPadded) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go similarity index 85% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go index 6ad3f13c..3f1f5e8a 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go @@ -7,8 +7,9 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // ConfigFieldReader reads fields out of an untyped map[string]string to the @@ -81,31 +82,16 @@ func (r *ConfigFieldReader) readField( k := strings.Join(address, ".") schema := schemaList[len(schemaList)-1] - // If we're getting the single element of a promoted list, then - // check to see if we have a single element we need to promote. - if address[len(address)-1] == "0" && len(schemaList) > 1 { - lastSchema := schemaList[len(schemaList)-2] - if lastSchema.Type == TypeList && lastSchema.PromoteSingle { - k := strings.Join(address[:len(address)-1], ".") - result, err := r.readPrimitive(k, schema) - if err == nil { - return result, nil - } - } - } - - if protoVersion5 { - switch schema.Type { - case TypeList, TypeSet, TypeMap, typeObject: - // Check if the value itself is unknown. - // The new protocol shims will add unknown values to this list of - // ComputedKeys. This is the only way we have to indicate that a - // collection is unknown in the config - for _, unknown := range r.Config.ComputedKeys { - if k == unknown { - log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) - return FieldReadResult{Computed: true, Exists: true}, nil - } + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil } } } @@ -114,17 +100,6 @@ func (r *ConfigFieldReader) readField( case TypeBool, TypeFloat, TypeInt, TypeString: return r.readPrimitive(k, schema) case TypeList: - // If we support promotion then we first check if we have a lone - // value that we must promote. - // a value that is alone. - if schema.PromoteSingle { - result, err := r.readPrimitive(k, schema.Elem.(*Schema)) - if err == nil && result.Exists { - result.Value = []interface{}{result.Value} - return result, nil - } - } - return readListField(&nestedConfigFieldReader{r}, address, schema) case TypeMap: return r.readMap(k, schema) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go similarity index 99% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go index 3e70acf0..642e7f32 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go @@ -4,8 +4,9 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // DiffFieldReader reads fields out of a diff structures. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go similarity index 88% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go index 53f73b71..092dd7f6 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go @@ -200,36 +200,3 @@ func (r BasicMapReader) Range(f func(string, string) bool) bool { return true } - -// MultiMapReader reads over multiple maps, preferring keys that are -// founder earlier (lower number index) vs. later (higher number index) -type MultiMapReader []map[string]string - -func (r MultiMapReader) Access(k string) (string, bool) { - for _, m := range r { - if v, ok := m[k]; ok { - return v, ok - } - } - - return "", false -} - -func (r MultiMapReader) Range(f func(string, string) bool) bool { - done := make(map[string]struct{}) - for _, m := range r { - for k, v := range m { - if _, ok := done[k]; ok { - continue - } - - if cont := f(k, v); !cont { - return false - } - - done[k] = struct{}{} - } - } - - return true -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go similarity index 99% rename from vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go index c09358b1..85d05be4 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go @@ -146,7 +146,7 @@ func (w *MapFieldWriter) setList( } } if err != nil { - for i, _ := range vs { + for i := range vs { is := strconv.FormatInt(int64(i), 10) setElement(is, nil) } @@ -227,7 +227,7 @@ func (w *MapFieldWriter) setObject( } } if err != nil { - for k1, _ := range v { + for k1 := range v { w.set(append(addrCopy, k1), nil) } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go new file mode 100644 index 00000000..d9c824d2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go @@ -0,0 +1,1461 @@ +package schema + +import ( + "context" + "encoding/json" + "fmt" + "log" + "strconv" + "sync" + + "github.com/hashicorp/go-cty/cty" + ctyconvert "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/msgpack" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const newExtraKey = "_new_extra_shim" + +func NewGRPCProviderServer(p *Provider) *GRPCProviderServer { + return &GRPCProviderServer{ + provider: p, + stopCh: make(chan struct{}), + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *Provider + stopCh chan struct{} + stopMu sync.Mutex +} + +// mergeStop is called in a goroutine and waits for the global stop signal +// and propagates cancellation to the passed in ctx/cancel func. The ctx is +// also passed to this function and waited upon so no goroutine leak is caused. +func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { + select { + case <-ctx.Done(): + return + case <-stopCh: + cancel() + } +} + +// StopContext derives a new context from the passed in grpc context. +// It creates a goroutine to wait for the server stop and propagates +// cancellation to the derived grpc context. +func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + go mergeStop(stoppable, cancel, s.stopCh) + return stoppable +} + +func (s *GRPCProviderServer) GetProviderSchema(_ context.Context, req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { + + resp := &tfprotov5.GetProviderSchemaResponse{ + ResourceSchemas: make(map[string]*tfprotov5.Schema), + DataSourceSchemas: make(map[string]*tfprotov5.Schema), + } + + resp.Provider = &tfprotov5.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + } + + resp.ProviderMeta = &tfprotov5.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderMetaSchemaBlock()), + } + + for typ, res := range s.provider.ResourcesMap { + resp.ResourceSchemas[typ] = &tfprotov5.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + resp.DataSourceSchemas[typ] = &tfprotov5.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { + return InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getProviderMetaSchemaBlock() *configschema.Block { + return InternalMap(s.provider.ProviderMetaSchema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { + resp := &tfprotov5.PrepareProviderConfigResponse{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return resp, nil + } + + configVal, err = schemaBlock.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.Validate(config)) + + preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &tfprotov5.DynamicValue{MsgPack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { + resp := &tfprotov5.ValidateResourceTypeConfigResponse{} + + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { + resp := &tfprotov5.ValidateDataSourceConfigResponse{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfprotov5.UpgradeResourceStateRequest) (*tfprotov5.UpgradeResourceStateResponse, error) { + resp := &tfprotov5.UpgradeResourceStateResponse{} + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + version := int(req.Version) + + jsonMap := map[string]interface{}{} + var err error + + switch { + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + case len(req.RawState.Flatmap) > 0: + jsonMap, version, err = s.upgradeFlatmapState(ctx, version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // if there's a JSON state, we need to decode it. + case len(req.RawState.JSON) > 0: + if res.UseJSONNumber { + err = unmarshalJSON(req.RawState.JSON, &jsonMap) + } else { + err = json.Unmarshal(req.RawState.JSON, &jsonMap) + } + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + default: + log.Println("[DEBUG] no state provided to upgrade") + return resp, nil + } + + // complete the upgrade of the JSON states + jsonMap, err = s.upgradeJSONState(ctx, version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // The provider isn't required to clean out removed fields + s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := JSONMapToStateValue(jsonMap, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to make sure blocks are represented correctly, which means + // that missing blocks are empty collections, rather than null. + // First we need to CoerceValue to ensure that all object types match. + val, err = schemaBlock.CoerceValue(val) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // Normalize the value and fill in any missing blocks. + val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &tfprotov5.DynamicValue{MsgPack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(ctx context.Context, version int, m map[string]string, res *Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := res.CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate && res.MigrateState == nil { + // Providers were previously allowed to bump the version + // without declaring MigrateState. + // If there are further upgraders, then we've only updated that far. + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else if requiresMigrate { + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := stateValueToJSONMap(newConfigVal, schemaType, res.UseJSONNumber) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(ctx context.Context, version int, m map[string]interface{}, res *Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(ctx, m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +// Remove any attributes no longer present in the schema, so that the json can +// be correctly decoded. +func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { + // we're only concerned with finding maps that corespond to object + // attributes + switch v := v.(type) { + case []interface{}: + // If these aren't blocks the next call will be a noop + if ty.IsListType() || ty.IsSetType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + } + return + case map[string]interface{}: + // map blocks aren't yet supported, but handle this just in case + if ty.IsMapType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + return + } + + if ty == cty.DynamicPseudoType { + log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + return + } + + if !ty.IsObjectType() { + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + log.Printf("[WARN] unexpected type %#v for map in json state", ty) + return + } + + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + delete(v, attr) + continue + } + + s.removeAttributes(attrV, attrTy) + } + } +} + +func (s *GRPCProviderServer) StopProvider(_ context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + // stop + close(s.stopCh) + // reset the stop signal + s.stopCh = make(chan struct{}) + + return &tfprotov5.StopProviderResponse{}, nil +} + +func (s *GRPCProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov5.ConfigureProviderRequest) (*tfprotov5.ConfigureProviderResponse, error) { + resp := &tfprotov5.ConfigureProviderResponse{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + // TODO: remove global stop context hack + // This attaches a global stop synchro'd context onto the provider.Configure + // request scoped context. This provides a substitute for the removed provider.StopContext() + // function. Ideally a provider should migrate to the context aware API that receives + // request scoped contexts, however this is a large undertaking for very large providers. + ctxHack := context.WithValue(ctx, StopContextKey, s.StopContext(context.Background())) + diags := s.provider.Configure(ctxHack, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *tfprotov5.ReadResourceRequest) (*tfprotov5.ReadResourceResponse, error) { + resp := &tfprotov5.ReadResourceResponse{ + // helper/schema did previously handle private data during refresh, but + // core is now going to expect this to be maintained in order to + // persist it in the state. + Private: req.Private, + } + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.Private) > 0 { + if err := json.Unmarshal(req.Private, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + instanceState.Meta = private + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + instanceState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.RefreshWithoutUpgrade(ctx, instanceState, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + if diags.HasError() { + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, false) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *tfprotov5.PlanResourceChangeRequest) (*tfprotov5.PlanResourceChangeResponse, error) { + resp := &tfprotov5.PlanResourceChangeResponse{} + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.UnsafeToUseLegacyTypeSystem = true + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) + + diff, err := res.SimpleDiff(ctx, priorState, cfg, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &tfprotov5.DynamicValue{ + MsgPack: plannedMP, + } + + // encode any timeouts into the diff Meta + t := &ResourceTimeout{} + if err := t.ConfigDecode(res, cfg); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if err := t.DiffEncode(diff); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *tfprotov5.ApplyResourceChangeRequest) (*tfprotov5.ApplyResourceChangeResponse, error) { + resp := &tfprotov5.ApplyResourceChangeResponse{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + } + } else { + diff, err = DiffFromValues(ctx, priorStateVal, plannedStateVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.MsgPack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.Apply(ctx, priorState, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.UnsafeToUseLegacyTypeSystem = true + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *tfprotov5.ImportResourceStateRequest) (*tfprotov5.ImportResourceStateResponse, error) { + resp := &tfprotov5.ImportResourceStateResponse{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(ctx, info, req.ID) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + schemaBlock := s.getResourceSchemaBlock(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Normalize the value and fill in any missing blocks. + newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + importedResource := &tfprotov5.ImportedResource{ + TypeName: resourceType, + State: &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5.ReadDataSourceRequest) (*tfprotov5.ReadDataSourceResponse, error) { + resp := &tfprotov5.ReadDataSourceResponse{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.MsgPack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // we need to still build the diff separately with the Read method to match + // the old behavior + res, ok := s.provider.DataSourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) + return resp, nil + } + diff, err := res.Diff(ctx, nil, config, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we can get the new complete data source + newInstanceState, diags := res.ReadDataApply(ctx, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + if diags.HasError() { + return resp, nil + } + + newStateVal, err := StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.State = &tfprotov5.DynamicValue{ + MsgPack: newStateMP, + } + return resp, nil +} + +func pathToAttributePath(path cty.Path) *tftypes.AttributePath { + var steps []tftypes.AttributePathStep + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, + tftypes.AttributeName(s.Name), + ) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, + tftypes.ElementKeyInt(i), + ) + case cty.String: + steps = append(steps, + tftypes.ElementKeyString(s.Key.AsString()), + ) + } + } + } + + return &tftypes.AttributePath{Steps: steps} +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[TimeoutsConfigKey] + if ok { + toAttrs[TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *Resource) *Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *Schema) *Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *Schema: + newSchema.Elem = stripSchema(e) + case *Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { + ty := dst.Type() + if !src.IsNull() && !src.IsKnown() { + // Return src during plan to retain unknown interpolated placeholders, + // which could be lost if we're only updating a resource. If this is a + // read scenario, then there shouldn't be any unknowns at all. + if dst.IsNull() && !apply { + return src + } + return dst + } + + // Handle null/empty changes for collections during apply. + // A change between null and empty values prefers src to make sure the state + // is consistent between plan and apply. + if ty.IsCollectionType() && apply { + dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 + srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 + + if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { + return src + } + } + + // check the invariants that we need below, to ensure we are working with + // non-null and known values. + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal, ok := dstMap[key] + if !ok && apply && ty.IsMapType() { + // don't transfer old map values to dst during apply + continue + } + + if dstVal == cty.NilVal { + if !apply && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + + dstMap[key] = normalizeNullValues(dstVal, v, apply) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && apply { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && apply { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a read or plan. + if !apply && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty == cty.String: + // The legacy SDK should not be able to remove a value during plan or + // apply, however we are only going to overwrite this if the source was + // an empty string, since that is what is often equated with unset and + // lost in the diff process. + if dst.IsNull() && src.AsString() == "" { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { + var diags []*tfprotov5.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + // if this is a set, the kv is also going to be null which + // isn't a valid path element, so we can't append it to the + // diagnostic. + p := path + if !kv.IsNull() { + p = append(p, cty.IndexStep{Key: kv}) + } + + diags = append(diags, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(p), + }) + continue + } + + d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ev, append(path, step)) + diags = convert.AppendProtoDiag(diags, d) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go new file mode 100644 index 00000000..265099a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go @@ -0,0 +1,12 @@ +package schema + +import ( + "bytes" + "encoding/json" +) + +func unmarshalJSON(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go new file mode 100644 index 00000000..25148e17 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -0,0 +1,474 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "sort" + "strings" + + "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/meta" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// StopContext returns a context safe for global use that will cancel +// when Terraform requests a stop. This function should only be called +// within a ConfigureContextFunc, passing in the request scoped context +// received in that method. +// +// Deprecated: The use of a global context is discouraged. Please use the new +// context aware CRUD methods. +func StopContext(ctx context.Context) (context.Context, bool) { + stopContext, ok := ctx.Value(StopContextKey).(context.Context) + return stopContext, ok +} + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ProviderMetaSchema is the schema for the configuration of the meta + // information for this provider. If this provider has no meta info, + // this can be omitted. This functionality is currently experimental + // and subject to change or break without warning; it should only be + // used by providers that are collaborating on its use with the + // Terraform team. + ProviderMetaSchema map[string]*Schema + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // Deprecated: Please use ConfigureContextFunc instead. + ConfigureFunc ConfigureFunc + + // ConfigureContextFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. This function + // receives a context.Context that will cancel when Terraform sends a + // cancellation signal. This function can yield Diagnostics. + ConfigureContextFunc ConfigureContextFunc + + meta interface{} + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// Deprecated: Please use ConfigureContextFunc +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// ConfigureContextFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureContextFunc func(context.Context, *ResourceData) (interface{}, diag.Diagnostics) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + if p.ConfigureFunc != nil && p.ConfigureContextFunc != nil { + return errors.New("ConfigureFunc and ConfigureContextFunc must not both be set") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + // Provider-specific checks + for k := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return validationErrors +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// GetSchema returns the config schema for the main provider +// configuration, as would appear in a "provider" block in the +// configuration files. +// +// Currently not all providers support schema. Callers must therefore +// first call Resources and DataSources and ensure that at least one +// resource or data source has the SchemaAvailable flag set. +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Validate is called once at the beginning with the raw configuration +// (no interpolation done) and can return diagnostics +// +// This is called once with the provider configuration only. It may not +// be called at all if no provider configuration is given. +// +// This should not assume that any values of the configurations are valid. +// The primary use case of this call is to check that required keys are +// set. +func (p *Provider) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + if err := p.InternalValidate(); err != nil { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: "InternalValidate", + Detail: fmt.Sprintf("Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err), + }, + } + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per resource. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) diag.Diagnostics { + r, ok := p.ResourcesMap[t] + if !ok { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support resource: %s", t), + }, + } + } + + return r.Validate(c) +} + +// Configure configures the provider itself with the configuration +// given. This is useful for setting things like access keys. +// +// This won't be called at all if no provider configuration is given. +func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) diag.Diagnostics { + // No configuration + if p.ConfigureFunc == nil && p.ConfigureContextFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(ctx, nil, c, nil, p.meta, true) + if err != nil { + return diag.FromErr(err) + } + + data, err := sm.Data(nil, diff) + if err != nil { + return diag.FromErr(err) + } + + if p.ConfigureFunc != nil { + meta, err := p.ConfigureFunc(data) + if err != nil { + return diag.FromErr(err) + } + p.meta = meta + } + if p.ConfigureContextFunc != nil { + meta, diags := p.ConfigureContextFunc(ctx, data) + if diags.HasError() { + return diags + } + p.meta = meta + } + + return nil +} + +// Resources returns all the available resource types that this provider +// knows how to manage. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +// ImportState requests that the given resource be imported. +// +// The returned InstanceState only requires ID be set. Importing +// will always call Refresh after the state to complete it. +// +// IMPORTANT: InstanceState doesn't have the resource type attached +// to it. A type must be specified on the state via the Ephemeral +// field on the state. +// +// This function can return multiple states. Normally, an import +// will map 1:1 to a physical resource. However, some resources map +// to multiple. For example, an AWS security group may contain many rules. +// Each rule is represented by a separate resource in Terraform, +// therefore multiple states are returned. +func (p *Provider) ImportState( + ctx context.Context, + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil || r.Importer.StateContext != nil { + var err error + if r.Importer.StateContext != nil { + results, err = r.Importer.StateContext(ctx, data, p.meta) + } else { + results, err = r.Importer.State(data, p.meta) + } + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to Terraform.") + } + } + + return states, nil +} + +// ValidateDataSource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per data source instance. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) diag.Diagnostics { + r, ok := p.DataSourcesMap[t] + if !ok { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support data source: %s", t), + }, + } + } + + return r.Validate(c) +} + +// DataSources returns all of the available data sources that this +// provider implements. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +// UserAgent returns a string suitable for use in the User-Agent header of +// requests generated by the provider. The generated string contains the +// version of Terraform, the Plugin SDK, and the provider used to generate the +// request. `name` should be the hyphen-separated reporting name of the +// provider, and `version` should be the version of the provider. +// +// If TF_APPEND_USER_AGENT is set, its value will be appended to the returned +// string. +func (p *Provider) UserAgent(name, version string) string { + ua := fmt.Sprintf("Terraform/%s (+https://www.terraform.io) Terraform-Plugin-SDK/%s", p.TerraformVersion, meta.SDKVersionString()) + if name != "" { + ua += " " + name + if version != "" { + ua += "/" + version + } + } + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} + +// GRPCProvider returns a gRPC server, for use with terraform-plugin-mux. +func (p *Provider) GRPCProvider() tfprotov5.ProviderServer { + return NewGRPCProviderServer(p) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go new file mode 100644 index 00000000..2f5663d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -0,0 +1,838 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +// Resource represents a thing in Terraform that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for terraform resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + // + // Deprecated: MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // The functions below are the CRUD operations for this resource. + // + // Deprecated: Please use the context aware equivalents instead. Only one of + // the operations or context aware equivalent can be set, not both. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + // + // Deprecated: ReadContext should be able to encapsulate the logic of Exists + Exists ExistsFunc + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not + // implemented, then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // These functions are passed a context configured to timeout with whatever + // was set as the timeout for this operation. Useful for forwarding on to + // backend SDK's that accept context. The context will also cancel if + // Terraform sends a cancellation signal. + // + // These functions return diagnostics, allowing developers to build + // a list of warnings and errors to be presented to the Terraform user. + // The AttributePath of those diagnostics should be built within these + // functions, please consult go-cty documentation for building a cty.Path + CreateContext CreateContextFunc + ReadContext ReadContextFunc + UpdateContext UpdateContextFunc + DeleteContext DeleteContextFunc + + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + DeprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout + + // Description is used as the description for docs, the language server and + // other user facing usage. It can be plain-text or markdown depending on the + // global DescriptionKind setting. + Description string + + // UseJSONNumber should be set when state upgraders will expect + // json.Numbers instead of float64s for numbers. This is added as a + // toggle for backwards compatibility for type assertions, but should + // be used in all new resources to avoid bugs with sufficiently large + // user input. + // + // See github.com/hashicorp/terraform-plugin-sdk/issues/655 for more + // details. + UseJSONNumber bool +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + +// The following function types are of the legacy CRUD operations. +// +// Deprecated: Please use the context aware equivalents instead. +type CreateFunc func(*ResourceData, interface{}) error +type ReadFunc func(*ResourceData, interface{}) error +type UpdateFunc func(*ResourceData, interface{}) error +type DeleteFunc func(*ResourceData, interface{}) error +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type CreateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type ReadContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type UpdateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type DeleteContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(context.Context, *ResourceDiff, interface{}) error + +func (r *Resource) create(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Create != nil { + if err := r.Create(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutCreate)) + defer cancel() + return r.CreateContext(ctx, d, meta) +} + +func (r *Resource) read(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Read != nil { + if err := r.Read(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutRead)) + defer cancel() + return r.ReadContext(ctx, d, meta) +} + +func (r *Resource) update(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Update != nil { + if err := r.Update(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutUpdate)) + defer cancel() + return r.UpdateContext(ctx, d, meta) +} + +func (r *Resource) delete(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Delete != nil { + if err := r.Delete(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutDelete)) + defer cancel() + return r.DeleteContext(ctx, d, meta) +} + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + ctx context.Context, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, diag.FromErr(err) + } + + if s != nil && data != nil { + data.providerMeta = s.ProviderMeta + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + var diags diag.Diagnostics + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + diags = append(diags, r.delete(ctx, data, meta)...) + if diags.HasError() { + return r.recordCurrentSchemaVersion(data.State()), diags + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, diags + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, append(diags, diag.FromErr(err)...) + } + + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + } + + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + diags = append(diags, r.create(ctx, data, meta)...) + } else { + if !r.updateFuncSet() { + return s, append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "doesn't support update", + }) + } + diags = append(diags, r.update(ctx, data, meta)...) + } + + return r.recordCurrentSchemaVersion(data.State()), diags +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(ctx, s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) SimpleDiff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(ctx, s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + diags := schemaMap(r.Schema).Validate(c) + + if r.DeprecationMessage != "" { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Resource", + Detail: r.DeprecationMessage, + }) + } + + return diags +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + ctx context.Context, + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, diag.Diagnostics) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, diag.FromErr(err) + } + + diags := r.read(ctx, data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), diags +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + ctx context.Context, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return s, diag.FromErr(err) + } + data.timeouts = &rt + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, diag.FromErr(err) + } + + if !exists { + return nil, nil + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return s, diag.FromErr(err) + } + data.timeouts = &rt + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + diags := r.read(ctx, data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), diags +} + +func (r *Resource) createFuncSet() bool { + return (r.Create != nil || r.CreateContext != nil) +} + +func (r *Resource) readFuncSet() bool { + return (r.Read != nil || r.ReadContext != nil) +} + +func (r *Resource) updateFuncSet() bool { + return (r.Update != nil || r.UpdateContext != nil) +} + +func (r *Resource) deleteFuncSet() bool { + return (r.Delete != nil || r.DeleteContext != nil) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.createFuncSet() || r.updateFuncSet() || r.deleteFuncSet() { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if !r.updateFuncSet() { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if !r.readFuncSet() { + return fmt.Errorf("Read must be implemented") + } + if !r.deleteFuncSet() { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + if f, ok := tsm["id"]; ok { + // if there is an explicit ID, validate it... + err := validateResourceID(f) + if err != nil { + return err + } + } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + // check context funcs are not set alongside their nonctx counterparts + if r.CreateContext != nil && r.Create != nil { + return fmt.Errorf("CreateContext and Create should not both be set") + } + if r.ReadContext != nil && r.Read != nil { + return fmt.Errorf("ReadContext and Read should not both be set") + } + if r.UpdateContext != nil && r.Update != nil { + return fmt.Errorf("UpdateContext and Update should not both be set") + } + if r.DeleteContext != nil && r.Delete != nil { + return fmt.Errorf("DeleteContext and Delete should not both be set") + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func validateResourceID(s *Schema) error { + if s.Type != TypeString { + return fmt.Errorf(`the "id" attribute must be of TypeString`) + } + + if s.Required { + return fmt.Errorf(`the "id" attribute cannot be marked Required`) + } + + // ID should at least be computed. If unspecified it will be set to Computed and Optional, + // but Optional is unnecessary if undesired. + if s.Computed != true { + return fmt.Errorf(`the "id" attribute must be marked Computed`) + } + return nil +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.createFuncSet() || r.readFuncSet()) +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// NoopContext is a convenience implementation of context aware resource function which takes +// no action and returns no error. +func NoopContext(context.Context, *ResourceData, interface{}) diag.Diagnostics { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go similarity index 80% rename from vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go index 1c390709..d12e7546 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -7,32 +7,33 @@ import ( "sync" "time" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/gocty" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // ResourceData is used to query and set the attributes of a resource. // // ResourceData is the primary argument received for CRUD operations on // a resource as well as configuration of a provider. It is a powerful -// structure that can be used to not only query data, but check for changes, -// define partial state updates, etc. +// structure that can be used to not only query data, but also check for changes // -// The most relevant methods to take a look at are Get, Set, and Partial. +// The most relevant methods to take a look at are Get and Set. type ResourceData struct { // Settable (internally) - schema map[string]*Schema - config *terraform.ResourceConfig - state *terraform.InstanceState - diff *terraform.InstanceDiff - meta map[string]interface{} - timeouts *ResourceTimeout + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + providerMeta cty.Value // Don't set multiReader *MultiLevelFieldReader setWriter *MapFieldWriter newState *terraform.InstanceState partial bool - partialMap map[string]struct{} once sync.Once isNew bool @@ -49,17 +50,6 @@ type getResult struct { Schema *Schema } -// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary -// values, bypassing schema. This MUST NOT be used in normal circumstances - -// it exists only to support the remote_state data source. -// -// Deprecated: Fully define schema attributes and use Set() instead. -func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { - d.once.Do(d.init) - - d.setWriter.unsafeWriteField(key, value) -} - // Get returns the data for the given key, or nil if the key doesn't exist // in the schema. // @@ -108,16 +98,11 @@ func (d *ResourceData) GetOk(key string) (interface{}, bool) { return r.Value, exists } -// GetOkExists returns the data for a given key and whether or not the key -// has been set to a non-zero value. This is only useful for determining -// if boolean attributes have been set, if they are Optional but do not -// have a Default value. +// GetOkExists can check if TypeBool attributes that are Optional with +// no Default value have been set. // -// This is nearly the same function as GetOk, yet it does not check -// for the zero value of the attribute's type. This allows for attributes -// without a default, to fully check for a literal assignment, regardless -// of the zero-value for that type. -// This should only be used if absolutely required/needed. +// Deprecated: usage is discouraged due to undefined behaviors and may be +// removed in a future version of the SDK func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { r := d.getRaw(key, getSourceSet) exists := r.Exists && !r.Computed @@ -133,6 +118,39 @@ func (d *ResourceData) getRaw(key string, level getSource) getResult { return d.get(parts, level) } +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceData) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + +// HasChangesExcept returns whether any keys outside the given keys have been changed. +// +// This function only works with root attribute keys. +func (d *ResourceData) HasChangesExcept(keys ...string) bool { + for attr := range d.diff.Attributes { + rootAttr := strings.Split(attr, ".")[0] + var skipAttr bool + + for _, key := range keys { + if rootAttr == key { + skipAttr = true + break + } + } + + if !skipAttr && d.HasChange(rootAttr) { + return true + } + } + + return false +} + // HasChange returns whether or not the given key has been changed. func (d *ResourceData) HasChange(key string) bool { o, n := d.GetChange(key) @@ -147,20 +165,38 @@ func (d *ResourceData) HasChange(key string) bool { return !reflect.DeepEqual(o, n) } -// Partial turns partial state mode on/off. +// HasChangeExcept returns whether any keys outside the given key have been changed. // -// When partial state mode is enabled, then only key prefixes specified -// by SetPartial will be in the final state. This allows providers to return -// partial states for partially applied resources (when errors occur). -func (d *ResourceData) Partial(on bool) { - d.partial = on - if on { - if d.partialMap == nil { - d.partialMap = make(map[string]struct{}) +// This function only works with root attribute keys. +func (d *ResourceData) HasChangeExcept(key string) bool { + for attr := range d.diff.Attributes { + rootAttr := strings.Split(attr, ".")[0] + + if rootAttr == key { + continue + } + + if d.HasChange(rootAttr) { + return true } - } else { - d.partialMap = nil } + + return false +} + +// Partial is a legacy function that was used for capturing state of specific +// attributes if an update only partially worked. Enabling this flag without +// setting any specific keys with the now removed SetPartial has a useful side +// effect of preserving all of the resource's previous state. Although confusing, +// it has been discovered that during an update when an error is returned, the +// proposed config is set into state, even without any calls to d.Set. +// +// In practice this default behavior goes mostly unnoticed since Terraform +// refreshes between operations by default. The state situation discussed is +// subject to further investigation and potential change. Until then, this +// function has been preserved for the specific usecase. +func (d *ResourceData) Partial(on bool) { + d.partial = on } // Set sets the value for the given key. @@ -189,24 +225,16 @@ func (d *ResourceData) Set(key string, value interface{}) error { } err := d.setWriter.WriteField(strings.Split(key, "."), value) - if err != nil && d.panicOnError { - panic(err) + if err != nil { + if d.panicOnError { + panic(err) + } else { + log.Printf("[ERROR] setting state: %s", err) + } } return err } -// SetPartial adds the key to the final state output while -// in partial state mode. The key must be a root key in the schema (i.e. -// it cannot be "list.0"). -// -// If partial state mode is disabled, then this has no effect. Additionally, -// whenever partial state mode is toggled, the partial data is cleared. -func (d *ResourceData) SetPartial(k string) { - if d.partial { - d.partialMap[k] = struct{}{} - } -} - func (d *ResourceData) MarkNewResource() { d.isNew = true } @@ -304,7 +332,7 @@ func (d *ResourceData) State() *terraform.InstanceState { // integrity check of fields existing in the schema, allowing dynamic // keys to be created. hasDynamicAttributes := false - for k, _ := range d.schema { + for k := range d.schema { if k == "__has_dynamic_attributes" { hasDynamicAttributes = true log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) @@ -319,11 +347,7 @@ func (d *ResourceData) State() *terraform.InstanceState { source := getSourceSet if d.partial { source = getSourceState - if _, ok := d.partialMap[k]; ok { - source = getSourceSet - } } - raw := d.get([]string{k}, source) if raw.Exists && !raw.Computed { rawMap[k] = raw.Value @@ -549,3 +573,10 @@ func (d *ResourceData) get(addr []string, source getSource) getResult { Schema: schema, } } + +func (d *ResourceData) GetProviderMeta(dst interface{}) error { + if d.providerMeta.IsNull() { + return nil + } + return gocty.FromCtyValue(d.providerMeta, &dst) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go similarity index 99% rename from vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go index 47b54810..984929df 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) // newValueWriter is a minor re-implementation of MapFieldWriter to include diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go new file mode 100644 index 00000000..3b17c8e9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go @@ -0,0 +1,79 @@ +package schema + +import ( + "context" + "errors" +) + +// ResourceImporter defines how a resource is imported in Terraform. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in Terraform is the process of taking an already-created +// resource and bringing it under Terraform management. This can include +// updating Terraform state, generating Terraform configuration, etc. +type ResourceImporter struct { + // State is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. + // + // Deprecated: State is deprecated in favor of StateContext. + // Only one of the two functions can bet set. + State StateFunc + + // StateContext is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. This function receives a context + // that will cancel if Terraform sends a cancellation signal. + StateContext StateContextFunc +} + +// StateFunc is the function called to import a resource into the Terraform state. +// +// Deprecated: Please use the context aware equivalent StateContextFunc. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// StateContextFunc is the function called to import a resource into the +// Terraform state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateContextFunc func(context.Context, *ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + if r.State != nil && r.StateContext != nil { + return errors.New("Both State and StateContext cannot be set.") + } + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. +// +// Deprecated: Please use the context aware ImportStatePassthroughContext instead +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} + +// ImportStatePassthroughContext is an implementation of StateContextFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthroughContext(ctx context.Context, d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go similarity index 98% rename from vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go index 5ad7aafc..cee0b678 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go @@ -5,9 +5,10 @@ import ( "log" "time" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go new file mode 100644 index 00000000..7146bef7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -0,0 +1,2147 @@ +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs, the language server and + // other user facing usage. It can be plain-text or markdown depending on the + // global DescriptionKind setting. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially managed via its own CRUD actions on the API. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + // + // ExactlyOneOf is a set of schema keys that, when set, only one of the + // keys in that list can be specified. It will error if none are + // specified as well. + // + // AtLeastOneOf is a set of schema keys that, when set, at least one of + // the keys in that list must be specified. + // + // RequiredWith is a set of schema keys that must be set simultaneously. + ConflictsWith []string + ExactlyOneOf []string + AtLeastOneOf []string + RequiredWith []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + ValidateFunc SchemaValidateFunc + + // ValidateDiagFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield diagnostics + // based on inspection of that value. + // + // ValidateDiagFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + // + // ValidateDiagFunc is also yielded the cty.Path the SDK has built up to this + // attribute. The SDK will automatically set the AttributePath of any returned + // Diagnostics to this path. Therefore the developer does not need to set + // the AttributePath for primitive types. + // + // In the case of TypeMap to provide the most precise information, please + // set an AttributePath with the additional cty.IndexStep: + // + // AttributePath: cty.IndexStringPath("key_name") + // + // Or alternatively use the passed in path to create the absolute path: + // + // AttributePath: append(path, cty.IndexStep{Key: cty.StringVal("key_name")}) + ValidateDiagFunc SchemaValidateDiagFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of Terraform may encrypt these + // values. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +// +// Deprecated: please use SchemaValidateDiagFunc +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +// SchemaValidateDiagFunc is a function used to validate a single field in the +// schema and has Diagnostic support. +type SchemaValidateDiagFunc func(interface{}, cty.Path) diag.Diagnostics + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + log.Println("[DEBUG] A computed value with the empty string as the new value and a non-empty old value was found. Interpreting the empty string as \"unset\" to align with legacy behavior.") + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +func (s *Schema) validateFunc(decoded interface{}, k string, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if s.ValidateDiagFunc != nil { + diags = s.ValidateDiagFunc(decoded, path) + for i := range diags { + if !diags[i].AttributePath.HasPrefix(path) { + diags[i].AttributePath = append(path, diags[i].AttributePath...) + } + } + } else if s.ValidateFunc != nil { + ws, es := s.ValidateFunc(decoded, k) + for _, w := range ws { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: w, + AttributePath: path, + }) + } + for _, e := range es { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: e.Error(), + AttributePath: path, + }) + } + } + + return diags +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + return os.Getenv("TF_ACC") != "" +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(ctx, rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(ctx, rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + return m.validateObject("", m, c, cty.Path{}) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ExactlyOneOf) > 0 && v.Required { + return fmt.Errorf("%s: ExactlyOneOf cannot be set with Required", k) + } + + if len(v.AtLeastOneOf) > 0 && v.Required { + return fmt.Errorf("%s: AtLeastOneOf cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap, v, false) + if err != nil { + return fmt.Errorf("ConflictsWith: %+v", err) + } + } + + if len(v.RequiredWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.RequiredWith, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("RequiredWith: %+v", err) + } + } + + if len(v.ExactlyOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("ExactlyOneOf: %+v", err) + } + } + + if len(v.AtLeastOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("AtLeastOneOf: %+v", err) + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + if v.Type == TypeMap && v.Elem != nil { + switch v.Elem.(type) { + case *Resource: + return fmt.Errorf("%s: TypeMap with Elem *Resource not supported,"+ + "use TypeList/TypeSet with Elem *Resource or TypeMap with Elem *Schema", k) + } + } + + if computedOnly { + if len(v.AtLeastOneOf) > 0 { + return fmt.Errorf("%s: AtLeastOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if len(v.ConflictsWith) > 0 { + return fmt.Errorf("%s: ConflictsWith is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.Default != nil { + return fmt.Errorf("%s: Default is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DefaultFunc != nil { + return fmt.Errorf("%s: DefaultFunc is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + if len(v.ExactlyOneOf) > 0 { + return fmt.Errorf("%s: ExactlyOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.InputDefault != "" { + return fmt.Errorf("%s: InputDefault is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MaxItems > 0 { + return fmt.Errorf("%s: MaxItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MinItems > 0 { + return fmt.Errorf("%s: MinItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.StateFunc != nil { + return fmt.Errorf("%s: StateFunc is extraneous, "+ + "value should just be changed before setting on computed-only field", k) + } + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateDiagFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + } + + if v.ValidateFunc != nil || v.ValidateDiagFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc are not yet supported on lists or sets.", k) + } + } + + if v.ValidateFunc != nil && v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc cannot both be set", k) + } + + if v.Deprecated == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap, self *Schema, allowSelfReference bool) error { + for _, key := range keys { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for idx, part := range parts { + // Skip index fields if 0 + partInt, err := strconv.Atoi(part) + + if err == nil { + if partInt != 0 { + return fmt.Errorf("%s configuration block reference (%s) can only use the .0. index for TypeList and MaxItems: 1 configuration blocks", k, key) + } + + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s references unknown attribute (%s) at part (%s)", k, key, part) + } + + subResource, ok := target.Elem.(*Resource) + + if !ok { + continue + } + + // Skip Type/MaxItems check if not the last element + if (target.Type == TypeSet || target.MaxItems != 1) && idx+1 != len(parts) { + return fmt.Errorf("%s configuration block reference (%s) can only be used with TypeList and MaxItems: 1 configuration blocks", k, key) + } + + sm = schemaMap(subResource.Schema) + } + + if target == nil { + return fmt.Errorf("%s cannot find target attribute (%s), sm: %#v", k, key, sm) + } + + if target == self && !allowSelfReference { + return fmt.Errorf("%s cannot reference self (%s)", k, key) + } + + if target.Required { + return fmt.Errorf("%s cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s cannot contain Computed(When) attribute (%s)", k, key) + } + } + + return nil +} + +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } else { + delete(diff.Attributes, k+".#") + } + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".%"] = finalizedAttr + } else { + delete(diff.Attributes, k+".%") + } + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + finalizedAttr := schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[prefix+k] = finalizedAttr + } + } + for k, v := range stateMap { + finalizedAttr := schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[prefix+k] = finalizedAttr + } + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + finalizedAttr := countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already setup. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + finalizedAttr := schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + if finalizedAttr != nil { + diff.Attributes[k] = finalizedAttr + } + + return nil +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Loading Default", + Detail: err.Error(), + AttributePath: path, + }) + } + + // We're okay as long as we had a value set + ok = raw != nil + } + + err := validateExactlyOneAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "ExactlyOne", + Detail: err.Error(), + AttributePath: path, + }) + } + + err = validateAtLeastOneAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "AtLeastOne", + Detail: err.Error(), + AttributePath: path, + }) + } + + if !ok { + if schema.Required { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", k), + AttributePath: path, + }) + } + return diags + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Computed attributes cannot be set", + Detail: fmt.Sprintf("Computed attributes cannot be set, but a value was set for %q.", k), + AttributePath: path, + }) + } + + err = validateRequiredWithAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "RequiredWith", + Detail: err.Error(), + AttributePath: path, + }) + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + if schema.Deprecated != "" { + return append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Attribute is deprecated", + Detail: schema.Deprecated, + AttributePath: path, + }) + } + return diags + } + + err = validateConflictingAttributes(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "ConflictsWith", + Detail: err.Error(), + AttributePath: path, + }) + } + + return m.validateType(k, raw, schema, c, path) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func removeDuplicates(elements []string) []string { + encountered := make(map[string]struct{}, 0) + result := []string{} + + for v := range elements { + if _, ok := encountered[elements[v]]; !ok { + encountered[elements[v]] = struct{}{} + result = append(result, elements[v]) + } + } + + return result +} + +func validateRequiredWithAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.RequiredWith) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.RequiredWith, k)) + sort.Strings(allKeys) + + for _, key := range allKeys { + if _, ok := c.Get(key); !ok { + return fmt.Errorf("%q: all of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + } + + return nil +} + +func validateExactlyOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ExactlyOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.ExactlyOneOf, k)) + sort.Strings(allKeys) + specified := make([]string, 0) + unknownVariableValueCount := 0 + for _, exactlyOneOfKey := range allKeys { + if c.IsComputed(exactlyOneOfKey) { + unknownVariableValueCount++ + continue + } + + _, ok := c.Get(exactlyOneOfKey) + if ok { + specified = append(specified, exactlyOneOfKey) + } + } + + if len(specified) == 0 && unknownVariableValueCount == 0 { + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + + if len(specified) > 1 { + return fmt.Errorf("%q: only one of `%s` can be specified, but `%s` were specified.", k, strings.Join(allKeys, ","), strings.Join(specified, ",")) + } + + return nil +} + +func validateAtLeastOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.AtLeastOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.AtLeastOneOf, k)) + sort.Strings(allKeys) + + for _, atLeastOneOfKey := range allKeys { + if _, ok := c.Get(atLeastOneOfKey); ok { + // We can ignore hcl2shim.UnknownVariable by assuming it's been set and additional validation elsewhere + // will uncover this if it is in fact null. + return nil + } + } + + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return diags + } + } + + // schemaMap can't validate nil + if raw == nil { + return diags + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + if rawV.Kind() != reflect.Slice { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a list", + AttributePath: path, + }) + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if !isWhollyKnown(raw) { + return diags + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "List longer than MaxItems", + Detail: fmt.Sprintf("Attribute supports %d item maximum, config has %d declared", schema.MaxItems, rawV.Len()), + AttributePath: path, + }) + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "List shorter than MinItems", + Detail: fmt.Sprintf("Attribute supports %d item minimum, config has %d declared", schema.MinItems, rawV.Len()), + AttributePath: path, + }) + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + p := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + diags = append(diags, m.validateObject(key, t.Schema, c, p)...) + case *Schema: + diags = append(diags, m.validateType(key, raw, t, c, p)...) + } + + } + + return diags +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return diags + } + } + + // schemaMap can't validate nil + if raw == nil { + return diags + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) + } + // Otherwise it's likely raw is an interpolation. + return diags + case reflect.Map: + case reflect.Slice: + default: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags + } + + return schema.validateFunc(mapIface, k, path) + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) + } + mapIface := v.Interface() + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags + } + } + + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.validateFunc(validatableMap, k, path) +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema, path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + for key, raw := range m { + valueType, err := getValueType(k, schema) + p := append(path, cty.IndexStep{Key: cty.StringVal(key)}) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return diags +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return diags + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Expected Object Type", + Detail: fmt.Sprintf("Expected object, got %s", reflect.ValueOf(raw).Kind()), + AttributePath: path, + }) + } + + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + diags = append(diags, m.validate(key, s, c, append(path, cty.GetAttrStep{Name: subK}))...) + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid or unknown key", + AttributePath: append(path, cty.GetAttrStep{Name: subk}), + }) + } + } + } + + return diags +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return diags + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a list", + AttributePath: path, + }) + case reflect.Map: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a map", + AttributePath: path, + }) + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return diags + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + case TypeInt: + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("Attribute must be a whole number, got %v", raw), + AttributePath: path, + }) + } + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + return append(diags, schema.validateFunc(decoded, k, path)...) +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + switch schema.Type { + case TypeList: + diags = m.validateList(k, raw, schema, c, path) + case TypeSet: + // indexing into sets is not representable in the current protocol + // best we can do is associate the path up to this attribute. + diags = m.validateList(k, raw, schema, c, path) + log.Printf("[WARN] Truncating attribute path of %d diagnostics for TypeSet", len(diags)) + for i := range diags { + diags[i].AttributePath = path + } + case TypeMap: + diags = m.validateMap(k, raw, schema, c, path) + default: + diags = m.validatePrimitive(k, raw, schema, c, path) + } + + if schema.Deprecated != "" { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Attribute", + Detail: schema.Deprecated, + AttributePath: path, + }) + } + + return diags +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go similarity index 94% rename from vendor/github.com/hashicorp/terraform/helper/schema/serialize.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go index fe6d7504..0e0e3cca 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go @@ -91,7 +91,12 @@ func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Reso sm := resource.Schema m := val.(map[string]interface{}) var keys []string - for k := range sm { + allComputed := true + for k, v := range sm { + if v.Optional || v.Required { + allComputed = false + } + keys = append(keys, k) } sort.Strings(keys) @@ -100,7 +105,7 @@ func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Reso // Skip attributes that are not user-provided. Computed attributes // do not contribute to the hash since their ultimate value cannot // be known at plan/diff time. - if !(innerSchema.Required || innerSchema.Optional) { + if !allComputed && !(innerSchema.Required || innerSchema.Optional) { continue } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go new file mode 100644 index 00000000..a510e60f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -0,0 +1,276 @@ +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func checkSetMapEqual(m1, m2 map[string]interface{}) bool { + if (m1 == nil) != (m2 == nil) { + return false + } + if len(m1) != len(m2) { + return false + } + for k := range m1 { + v1 := m1[k] + v2, ok := m2[k] + if !ok { + return false + } + switch v1.(type) { + case map[string]interface{}: + same := checkSetMapEqual(v1.(map[string]interface{}), v2.(map[string]interface{})) + if !same { + return false + } + case *Set: + same := v1.(*Set).Equal(v2) + if !same { + return false + } + default: + same := reflect.DeepEqual(v1, v2) + if !same { + return false + } + } + } + return true +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + return checkSetMapEqual(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go new file mode 100644 index 00000000..e1575a0f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go @@ -0,0 +1,126 @@ +package schema + +import ( + "context" + "encoding/json" + + "github.com/hashicorp/go-cty/cty" + ctyjson "github.com/hashicorp/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a terraform.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(ctx context.Context, prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(ctx, prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(ctx context.Context, prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := terraform.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(ctx, instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == hcl2shim.UnknownVariableValue { + delete(cfg, k) + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + return stateValueToJSONMap(val, ty, false) +} + +func stateValueToJSONMap(val cty.Value, ty cty.Type, useJSONNumber bool) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if useJSONNumber { + if err := unmarshalJSON(js, &m); err != nil { + return nil, err + } + } else { + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a terraform.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go new file mode 100644 index 00000000..f345f832 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go @@ -0,0 +1,29 @@ +package schema + +import ( + "context" + + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw(t testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := terraform.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(context.Background(), nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/unknown.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/unknown.go new file mode 100644 index 00000000..c58d3648 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/unknown.go @@ -0,0 +1,132 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go diff --git a/vendor/github.com/hashicorp/terraform/addrs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/addrs/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go new file mode 100644 index 00000000..064aeda2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go @@ -0,0 +1,47 @@ +package addrs + +import ( + "fmt" +) + +// instanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// intKey and stringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type instanceKey interface { + instanceKeySigil() + String() string +} + +// NoKey represents the absense of an instanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey instanceKey + +// intKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type intKey int + +func (k intKey) instanceKeySigil() { +} + +func (k intKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// stringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type stringKey string + +func (k stringKey) instanceKeySigil() { +} + +func (k stringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go new file mode 100644 index 00000000..98699f46 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go @@ -0,0 +1,13 @@ +package addrs + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go new file mode 100644 index 00000000..f31d833d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go @@ -0,0 +1,241 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +func parseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "A module instance address must begin with \"module.\".", + )) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "The module instance address is followed by additional invalid content.", + )) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + for _, err := range parseDiags.Errs() { + // ignore warnings, they don't matter in this case + diags = append(diags, tfdiags.FromError(err)) + } + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := parseModuleInstance(traversal) + diags = append(diags, addrDiags...) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Module address prefix must be followed by dot and then a name.", + )) + break + } + + if next != "module" { + break + } + + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = stringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = intKey(idxInt) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + fmt.Sprintf("Invalid module index: %s.", err), + )) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Invalid module key: must be either a string or an integer.", + )) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey instanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key instanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go new file mode 100644 index 00000000..f56032b5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go @@ -0,0 +1,130 @@ +package addrs + +import ( + "fmt" +) + +// resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type resource struct { + Mode resourceMode + Type string + Name string +} + +func (r resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +// resourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type resourceInstance struct { + Resource resource + Key instanceKey +} + +func (r resourceInstance) ContainingResource() resource { + return r.Resource +} + +func (r resourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +// absResource is an absolute address for a resource under a given module path. +type absResource struct { + Module ModuleInstance + Resource resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode resourceMode, typeName string, name string) absResource { + return absResource{ + Module: m, + Resource: resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +func (r absResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// absResourceInstance is an absolute address for a resource instance under a +// given module path. +type absResourceInstance struct { + Module ModuleInstance + Resource resourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode resourceMode, typeName string, name string, key instanceKey) absResourceInstance { + return absResourceInstance{ + Module: m, + Resource: resourceInstance{ + Resource: resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an absResource value. +func (r absResourceInstance) ContainingResource() absResource { + return absResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +func (r absResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// resourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type resourceMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type resourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode resourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode resourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode resourceMode = 'D' +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go new file mode 100644 index 00000000..3b2e65c3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type resourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _resourceMode_name_0 = "InvalidResourceMode" + _resourceMode_name_1 = "DataResourceMode" + _resourceMode_name_2 = "ManagedResourceMode" +) + +func (i resourceMode) String() string { + switch { + case i == 0: + return _resourceMode_name_0 + case i == 68: + return _resourceMode_name_1 + case i == 77: + return _resourceMode_name_2 + default: + return "resourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go similarity index 98% rename from vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go index 41a53374..48278abe 100644 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go @@ -3,8 +3,8 @@ package configschema import ( "fmt" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" ) // CoerceValue attempts to force the given value to conform to the type diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/configs/configschema/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go similarity index 98% rename from vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go index 005da56b..51b8c5d2 100644 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go @@ -1,7 +1,7 @@ package configschema import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // EmptyValue returns the "empty value" for the recieving block, which for diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go new file mode 100644 index 00000000..edc9dadc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go @@ -0,0 +1,68 @@ +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + atys := make(map[string]cty.Type) + + for name, attrS := range b.Attributes { + atys[name] = attrS.Type + } + + for name, blockS := range b.BlockTypes { + if _, exists := atys[name]; exists { + panic("invalid schema, blocks and attributes cannot have the same name") + } + + childType := blockS.Block.ImpliedType() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + atys[name] = childType + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.List(childType) + } + case NestingSet: + if childType.HasDynamicTypes() { + panic("can't use cty.DynamicPseudoType inside a block type with NestingSet") + } + atys[name] = cty.Set(childType) + case NestingMap: + // We prefer to use a map where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use an object + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.Map(childType) + } + default: + panic("invalid nesting type") + } + } + + return cty.Object(atys) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go new file mode 100644 index 00000000..6cddb9c6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go @@ -0,0 +1,155 @@ +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// StringKind represents the format a string is in. +type StringKind int + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain StringKind = iota + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock + + // Description and DescriptionKind contain a user facing description of the block + // and the format of that string. + Description string + DescriptionKind StringKind + + // Deprecated indicates whether the block has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + DescriptionKind StringKind + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool + + // Deprecated indicates whether the attribute has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go similarity index 99% rename from vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go index bb4228d9..e620e76a 100644 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/flatmap.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go @@ -5,9 +5,8 @@ import ( "strconv" "strings" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" ) // FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go new file mode 100644 index 00000000..e557845a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go @@ -0,0 +1,276 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/configs/hcl2shim/single_attr_body.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go new file mode 100644 index 00000000..91e91547 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go @@ -0,0 +1,230 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go similarity index 99% rename from vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go index 92f0213d..87638b4e 100644 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values_equiv.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go @@ -1,7 +1,7 @@ package hcl2shim import ( - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" ) // ValuesSDKEquivalent returns true if both of the given values seem equivalent diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go similarity index 96% rename from vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go index c23f44da..d6aabbe3 100644 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go @@ -1,13 +1,14 @@ package objchange import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" ) // NormalizeObjectFromLegacySDK takes an object that may have been generated // by the legacy Terraform SDK (i.e. returned from a provider with the -// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// UnsafeToUseLegacyTypeSystem opt-out set) and does its best to normalize it for the // assumptions we would normally enforce if the provider had not opted out. // // In particular, this function guarantees that a value representing a nested diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go new file mode 100644 index 00000000..ac66eb71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" +) + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*tfprotov5.Diagnostic, d interface{}) []*tfprotov5.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: d.Error(), + Attribute: ap, + }) + case diag.Diagnostics: + diags = append(diags, DiagsToProto(d)...) + case error: + diags = append(diags, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: d.Error(), + }) + case string: + diags = append(diags, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityWarning, + Summary: d, + }) + case *tfprotov5.Diagnostic: + diags = append(diags, d) + case []*tfprotov5.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiags converts a list of tfprotov5.Diagnostics to a diag.Diagnostics. +func ProtoToDiags(ds []*tfprotov5.Diagnostic) diag.Diagnostics { + var diags diag.Diagnostics + for _, d := range ds { + var severity diag.Severity + + switch d.Severity { + case tfprotov5.DiagnosticSeverityError: + severity = diag.Error + case tfprotov5.DiagnosticSeverityWarning: + severity = diag.Warning + } + + diags = append(diags, diag.Diagnostic{ + Severity: severity, + Summary: d.Summary, + Detail: d.Detail, + AttributePath: AttributePathToPath(d.Attribute), + }) + } + + return diags +} + +func DiagsToProto(diags diag.Diagnostics) []*tfprotov5.Diagnostic { + var ds []*tfprotov5.Diagnostic + for _, d := range diags { + if err := d.Validate(); err != nil { + panic(fmt.Errorf("Invalid diagnostic: %s. This is always a bug in the provider implementation", err)) + } + protoDiag := &tfprotov5.Diagnostic{ + Summary: d.Summary, + Detail: d.Detail, + Attribute: PathToAttributePath(d.AttributePath), + } + if d.Severity == diag.Error { + protoDiag.Severity = tfprotov5.DiagnosticSeverityError + } else if d.Severity == diag.Warning { + protoDiag.Severity = tfprotov5.DiagnosticSeverityWarning + } + ds = append(ds, protoDiag) + } + return ds +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *tftypes.AttributePath) cty.Path { + var p cty.Path + if ap == nil { + return p + } + for _, step := range ap.Steps { + switch step.(type) { + case tftypes.AttributeName: + p = p.GetAttr(string(step.(tftypes.AttributeName))) + case tftypes.ElementKeyString: + p = p.Index(cty.StringVal(string(step.(tftypes.ElementKeyString)))) + case tftypes.ElementKeyInt: + p = p.Index(cty.NumberIntVal(int64(step.(tftypes.ElementKeyInt)))) + } + } + return p +} + +// PathToAttributePath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *tftypes.AttributePath { + ap := &tftypes.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, tftypes.AttributeName(selector.Name)) + + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, tftypes.ElementKeyString(key.AsString())) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, tftypes.ElementKeyInt(v)) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go new file mode 100644 index 00000000..cdcdcfa1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go @@ -0,0 +1,300 @@ +package convert + +import ( + "fmt" + "log" + "reflect" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +func tftypeFromCtyType(in cty.Type) (tftypes.Type, error) { + switch { + case in.Equals(cty.String): + return tftypes.String, nil + case in.Equals(cty.Number): + return tftypes.Number, nil + case in.Equals(cty.Bool): + return tftypes.Bool, nil + case in.Equals(cty.DynamicPseudoType): + return tftypes.DynamicPseudoType, nil + case in.IsSetType(): + elemType, err := tftypeFromCtyType(in.ElementType()) + if err != nil { + return nil, err + } + return tftypes.Set{ + ElementType: elemType, + }, nil + case in.IsListType(): + elemType, err := tftypeFromCtyType(in.ElementType()) + if err != nil { + return nil, err + } + return tftypes.List{ + ElementType: elemType, + }, nil + case in.IsTupleType(): + elemTypes := make([]tftypes.Type, 0, in.Length()) + for _, typ := range in.TupleElementTypes() { + elemType, err := tftypeFromCtyType(typ) + if err != nil { + return nil, err + } + elemTypes = append(elemTypes, elemType) + } + return tftypes.Tuple{ + ElementTypes: elemTypes, + }, nil + case in.IsMapType(): + elemType, err := tftypeFromCtyType(in.ElementType()) + if err != nil { + return nil, err + } + return tftypes.Map{ + AttributeType: elemType, + }, nil + case in.IsObjectType(): + attrTypes := make(map[string]tftypes.Type) + for key, typ := range in.AttributeTypes() { + attrType, err := tftypeFromCtyType(typ) + if err != nil { + return nil, err + } + attrTypes[key] = attrType + } + return tftypes.Object{ + AttributeTypes: attrTypes, + }, nil + } + return nil, fmt.Errorf("unknown cty type %s", in.GoString()) +} + +func ctyTypeFromTFType(in tftypes.Type) (cty.Type, error) { + switch { + case in.Is(tftypes.String): + return cty.String, nil + case in.Is(tftypes.Bool): + return cty.Bool, nil + case in.Is(tftypes.Number): + return cty.Number, nil + case in.Is(tftypes.DynamicPseudoType): + return cty.DynamicPseudoType, nil + case in.Is(tftypes.List{}): + elemType, err := ctyTypeFromTFType(in.(tftypes.List).ElementType) + if err != nil { + return cty.Type{}, err + } + return cty.List(elemType), nil + case in.Is(tftypes.Set{}): + elemType, err := ctyTypeFromTFType(in.(tftypes.Set).ElementType) + if err != nil { + return cty.Type{}, err + } + return cty.Set(elemType), nil + case in.Is(tftypes.Map{}): + elemType, err := ctyTypeFromTFType(in.(tftypes.Map).AttributeType) + if err != nil { + return cty.Type{}, err + } + return cty.Map(elemType), nil + case in.Is(tftypes.Tuple{}): + elemTypes := make([]cty.Type, 0, len(in.(tftypes.Tuple).ElementTypes)) + for _, typ := range in.(tftypes.Tuple).ElementTypes { + elemType, err := ctyTypeFromTFType(typ) + if err != nil { + return cty.Type{}, err + } + elemTypes = append(elemTypes, elemType) + } + return cty.Tuple(elemTypes), nil + case in.Is(tftypes.Object{}): + attrTypes := make(map[string]cty.Type, len(in.(tftypes.Object).AttributeTypes)) + for k, v := range in.(tftypes.Object).AttributeTypes { + attrType, err := ctyTypeFromTFType(v) + if err != nil { + return cty.Type{}, err + } + attrTypes[k] = attrType + } + return cty.Object(attrTypes), nil + } + return cty.Type{}, fmt.Errorf("unknown tftypes.Type %s", in) +} + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// tfprotov5.SchemaBlock for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *tfprotov5.SchemaBlock { + block := &tfprotov5.SchemaBlock{ + Description: b.Description, + DescriptionKind: protoStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &tfprotov5.SchemaAttribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + var err error + attr.Type, err = tftypeFromCtyType(a.Type) + if err != nil { + panic(err) + } + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoStringKind(k configschema.StringKind) tfprotov5.StringKind { + switch k { + default: + log.Printf("[TRACE] unexpected configschema.StringKind: %d", k) + return tfprotov5.StringKindPlain + case configschema.StringPlain: + return tfprotov5.StringKindPlain + case configschema.StringMarkdown: + return tfprotov5.StringKindMarkdown + } +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *tfprotov5.SchemaNestedBlock { + var nesting tfprotov5.SchemaNestedBlockNestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = tfprotov5.SchemaNestedBlockNestingModeSingle + case configschema.NestingGroup: + nesting = tfprotov5.SchemaNestedBlockNestingModeGroup + case configschema.NestingList: + nesting = tfprotov5.SchemaNestedBlockNestingModeList + case configschema.NestingSet: + nesting = tfprotov5.SchemaNestedBlockNestingModeSet + case configschema.NestingMap: + nesting = tfprotov5.SchemaNestedBlockNestingModeMap + default: + nesting = tfprotov5.SchemaNestedBlockNestingModeInvalid + } + return &tfprotov5.SchemaNestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToConfigSchema takes the GetSchema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *tfprotov5.SchemaBlock) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + var err error + attr.Type, err = ctyTypeFromTFType(a.Type) + if err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaStringKind(k tfprotov5.StringKind) configschema.StringKind { + switch k { + default: + log.Printf("[TRACE] unexpected tfprotov5.StringKind: %d", k) + return configschema.StringPlain + case tfprotov5.StringKindPlain: + return configschema.StringPlain + case tfprotov5.StringKindMarkdown: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(b *tfprotov5.SchemaNestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case tfprotov5.SchemaNestedBlockNestingModeSingle: + nesting = configschema.NestingSingle + case tfprotov5.SchemaNestedBlockNestingModeGroup: + nesting = configschema.NestingGroup + case tfprotov5.SchemaNestedBlockNestingModeList: + nesting = configschema.NestingList + case tfprotov5.SchemaNestedBlockNestingModeMap: + nesting = configschema.NestingMap + case tfprotov5.SchemaNestedBlockNestingModeSet: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go new file mode 100644 index 00000000..88ae7eea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/config.go @@ -0,0 +1,54 @@ +package plugintest + +import ( + "context" + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/terraform-exec/tfinstall" +) + +// Config is used to configure the test helper. In most normal test programs +// the configuration is discovered automatically by an Init* function using +// DiscoverConfig, but this is exposed so that more complex scenarios can be +// implemented by direct configuration. +type Config struct { + SourceDir string + TerraformExec string + execTempDir string + PreviousPluginExec string +} + +// DiscoverConfig uses environment variables and other means to automatically +// discover a reasonable test helper configuration. +func DiscoverConfig(sourceDir string) (*Config, error) { + tfVersion := os.Getenv("TF_ACC_TERRAFORM_VERSION") + tfPath := os.Getenv("TF_ACC_TERRAFORM_PATH") + + tempDir := os.Getenv("TF_ACC_TEMP_DIR") + tfDir, err := ioutil.TempDir(tempDir, "plugintest-terraform") + if err != nil { + return nil, fmt.Errorf("failed to create temp dir: %w", err) + } + + finders := []tfinstall.ExecPathFinder{} + switch { + case tfPath != "": + finders = append(finders, tfinstall.ExactPath(tfPath)) + case tfVersion != "": + finders = append(finders, tfinstall.ExactVersion(tfVersion, tfDir)) + default: + finders = append(finders, tfinstall.LookPath(), tfinstall.LatestVersion(tfDir, true)) + } + tfExec, err := tfinstall.Find(context.Background(), finders...) + if err != nil { + return nil, err + } + + return &Config{ + SourceDir: sourceDir, + TerraformExec: tfExec, + execTempDir: tfDir, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go new file mode 100644 index 00000000..3f84c6a3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/doc.go @@ -0,0 +1,7 @@ +// Package plugintest contains utilities to help with writing tests for +// Terraform plugins. +// +// This is not a package for testing configurations or modules written in the +// Terraform language. It is for testing the plugins that allow Terraform to +// manage various cloud services and other APIs. +package plugintest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go new file mode 100644 index 00000000..56eaa0c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/guard.go @@ -0,0 +1,94 @@ +package plugintest + +import ( + "fmt" + "os" + "testing" +) + +// AcceptanceTest is a test guard that will produce a log and call SkipNow on +// the given TestControl if the environment variable TF_ACC isn't set to +// indicate that the caller wants to run acceptance tests. +// +// Call this immediately at the start of each acceptance test function to +// signal that it may cost money and thus requires this opt-in enviromment +// variable. +// +// For the purpose of this function, an "acceptance test" is any est that +// reaches out to services that are not directly controlled by the test program +// itself, particularly if those requests may lead to service charges. For any +// system where it is possible and realistic to run a local instance of the +// service for testing (e.g. in a daemon launched by the test program itself), +// prefer to do this and _don't_ call AcceptanceTest, thus allowing tests to be +// run more easily and without external cost by contributors. +func AcceptanceTest(t TestControl) { + t.Helper() + if os.Getenv("TF_ACC") != "" { + t.Log("TF_ACC is not set") + t.SkipNow() + } +} + +// LongTest is a test guard that will produce a log and call SkipNow on the +// given TestControl if the test harness is currently running in "short mode". +// +// What is considered a "long test" will always be pretty subjective, but test +// implementers should think of this in terms of what seems like it'd be +// inconvenient to run repeatedly for quick feedback while testing a new feature +// under development. +// +// When testing resource types that always take several minutes to complete +// operations, consider having a single general test that covers the basic +// functionality and then mark any other more specific tests as long tests so +// that developers can quickly smoke-test a particular feature when needed +// but can still run the full set of tests for a feature when needed. +func LongTest(t TestControl) { + t.Helper() + if testing.Short() { + t.Log("skipping long test because of short mode") + t.SkipNow() + } +} + +// TestControl is an interface requiring a subset of *testing.T which is used +// by the test guards and helpers in this package. Most callers can simply +// pass their *testing.T value here, but the interface allows other +// implementations to potentially be provided instead, for example to allow +// meta-testing (testing of the test utilities themselves). +// +// This interface also describes the subset of normal test functionality the +// guards and helpers can perform: they can only create log lines, fail tests, +// and skip tests. All other test control is the responsibility of the main +// test code. +type TestControl interface { + Helper() + Log(args ...interface{}) + FailNow() + SkipNow() +} + +// testingT wraps a TestControl to recover some of the convenience behaviors +// that would normally come from a real *testing.T, so we can keep TestControl +// small while still having these conveniences. This is an abstraction +// inversion, but accepted because it makes the public API more convenient +// without any considerable disadvantage. +type testingT struct { + TestControl +} + +func (t testingT) Logf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) +} + +func (t testingT) Fatalf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) + t.FailNow() +} + +func (t testingT) Skipf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) + t.SkipNow() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go new file mode 100644 index 00000000..4b130b49 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go @@ -0,0 +1,151 @@ +package plugintest + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/terraform-exec/tfexec" +) + +const subprocessCurrentSigil = "4acd63807899403ca4859f5bb948d2c6" +const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" + +// AutoInitProviderHelper is the main entrypoint for testing provider plugins +// using this package. It is intended to be called during TestMain to prepare +// for provider testing. +// +// AutoInitProviderHelper will discover the location of a current Terraform CLI +// executable to test against, detect whether a prior version of the plugin is +// available for upgrade tests, and then will return an object containing the +// results of that initialization which can then be stored in a global variable +// for use in other tests. +func AutoInitProviderHelper(sourceDir string) *Helper { + helper, err := AutoInitHelper(sourceDir) + if err != nil { + fmt.Fprintf(os.Stderr, "cannot run Terraform provider tests: %s\n", err) + os.Exit(1) + } + return helper +} + +// Helper is intended as a per-package singleton created in TestMain which +// other tests in a package can use to create Terraform execution contexts +type Helper struct { + baseDir string + + // sourceDir is the dir containing the provider source code, needed + // for tests that use fixture files. + sourceDir string + terraformExec string + + // execTempDir is created during DiscoverConfig to store any downloaded + // binaries + execTempDir string +} + +// AutoInitHelper uses the auto-discovery behavior of DiscoverConfig to prepare +// a configuration and then calls InitHelper with it. This is a convenient +// way to get the standard init behavior based on environment variables, and +// callers should use this unless they have an unusual requirement that calls +// for constructing a config in a different way. +func AutoInitHelper(sourceDir string) (*Helper, error) { + config, err := DiscoverConfig(sourceDir) + if err != nil { + return nil, err + } + + return InitHelper(config) +} + +// InitHelper prepares a testing helper with the given configuration. +// +// For most callers it is sufficient to call AutoInitHelper instead, which +// will construct a configuration automatically based on certain environment +// variables. +// +// If this function returns an error then it may have left some temporary files +// behind in the system's temporary directory. There is currently no way to +// automatically clean those up. +func InitHelper(config *Config) (*Helper, error) { + tempDir := os.Getenv("TF_ACC_TEMP_DIR") + baseDir, err := ioutil.TempDir(tempDir, "plugintest") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) + } + + return &Helper{ + baseDir: baseDir, + sourceDir: config.SourceDir, + terraformExec: config.TerraformExec, + execTempDir: config.execTempDir, + }, nil +} + +// Close cleans up temporary files and directories created to support this +// helper, returning an error if any of the cleanup fails. +// +// Call this before returning from TestMain to minimize the amount of detritus +// left behind in the filesystem after the tests complete. +func (h *Helper) Close() error { + if h.execTempDir != "" { + err := os.RemoveAll(h.execTempDir) + if err != nil { + return err + } + } + return os.RemoveAll(h.baseDir) +} + +// NewWorkingDir creates a new working directory for use in the implementation +// of a single test, returning a WorkingDir object representing that directory. +// +// If the working directory object is not itself closed by the time the test +// program exits, the Close method on the helper itself will attempt to +// delete it. +func (h *Helper) NewWorkingDir() (*WorkingDir, error) { + dir, err := ioutil.TempDir(h.baseDir, "work") + if err != nil { + return nil, err + } + + // symlink the provider source files into the config directory + // e.g. testdata + err = symlinkDirectoriesOnly(h.sourceDir, dir) + if err != nil { + return nil, err + } + + tf, err := tfexec.NewTerraform(dir, h.terraformExec) + if err != nil { + return nil, err + } + + return &WorkingDir{ + h: h, + tf: tf, + baseDir: dir, + terraformExec: h.terraformExec, + }, nil +} + +// RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl +// object and will immediately fail the running test if the creation of the +// working directory fails. +func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { + t.Helper() + + wd, err := h.NewWorkingDir() + if err != nil { + t := testingT{t} + t.Fatalf("failed to create new working directory: %s", err) + return nil + } + return wd +} + +// TerraformExecPath returns the location of the Terraform CLI executable that +// should be used when running tests. +func (h *Helper) TerraformExecPath() string { + return h.terraformExec +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go new file mode 100644 index 00000000..df1cb92f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/util.go @@ -0,0 +1,95 @@ +package plugintest + +import ( + "os" + "path/filepath" +) + +func symlinkFile(src string, dest string) (err error) { + err = os.Symlink(src, dest) + if err == nil { + srcInfo, err := os.Stat(src) + if err != nil { + err = os.Chmod(dest, srcInfo.Mode()) + } + } + + return +} + +// symlinkDir is a simplistic function for recursively symlinking all files in a directory to a new path. +// It is intended only for limited internal use and does not cover all edge cases. +func symlinkDir(srcDir string, destDir string) (err error) { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return err + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return err + } + + directory, _ := os.Open(srcDir) + defer directory.Close() + objects, err := directory.Readdir(-1) + + for _, obj := range objects { + srcPath := filepath.Join(srcDir, obj.Name()) + destPath := filepath.Join(destDir, obj.Name()) + + if obj.IsDir() { + err = symlinkDir(srcPath, destPath) + if err != nil { + return err + } + } else { + err = symlinkFile(srcPath, destPath) + if err != nil { + return err + } + } + + } + return +} + +// symlinkDirectoriesOnly finds only the first-level child directories in srcDir +// and symlinks them into destDir. +// Unlike symlinkDir, this is done non-recursively in order to limit the number +// of file descriptors used. +func symlinkDirectoriesOnly(srcDir string, destDir string) (err error) { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return err + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return err + } + + directory, err := os.Open(srcDir) + if err != nil { + return err + } + defer directory.Close() + objects, err := directory.Readdir(-1) + if err != nil { + return err + } + + for _, obj := range objects { + srcPath := filepath.Join(srcDir, obj.Name()) + destPath := filepath.Join(destDir, obj.Name()) + + if obj.IsDir() { + err = symlinkFile(srcPath, destPath) + if err != nil { + return err + } + } + + } + return +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go new file mode 100644 index 00000000..1faeb5c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -0,0 +1,252 @@ +package plugintest + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/hashicorp/terraform-exec/tfexec" + tfjson "github.com/hashicorp/terraform-json" +) + +const ( + ConfigFileName = "terraform_plugin_test.tf" + PlanFileName = "tfplan" +) + +// WorkingDir represents a distinct working directory that can be used for +// running tests. Each test should construct its own WorkingDir by calling +// NewWorkingDir or RequireNewWorkingDir on its package's singleton +// plugintest.Helper. +type WorkingDir struct { + h *Helper + + // baseDir is the root of the working directory tree + baseDir string + + // baseArgs is arguments that should be appended to all commands + baseArgs []string + + // tf is the instance of tfexec.Terraform used for running Terraform commands + tf *tfexec.Terraform + + // terraformExec is a path to a terraform binary, inherited from Helper + terraformExec string + + // reattachInfo stores the gRPC socket info required for Terraform's + // plugin reattach functionality + reattachInfo tfexec.ReattachInfo + + env map[string]string +} + +// Close deletes the directories and files created to represent the receiving +// working directory. After this method is called, the working directory object +// is invalid and may no longer be used. +func (wd *WorkingDir) Close() error { + return os.RemoveAll(wd.baseDir) +} + +// Setenv sets an environment variable on the WorkingDir. +func (wd *WorkingDir) Setenv(envVar, val string) { + if wd.env == nil { + wd.env = map[string]string{} + } + wd.env[envVar] = val +} + +// Unsetenv removes an environment variable from the WorkingDir. +func (wd *WorkingDir) Unsetenv(envVar string) { + delete(wd.env, envVar) +} + +func (wd *WorkingDir) SetReattachInfo(reattachInfo tfexec.ReattachInfo) { + wd.reattachInfo = reattachInfo +} + +func (wd *WorkingDir) UnsetReattachInfo() { + wd.reattachInfo = nil +} + +// GetHelper returns the Helper set on the WorkingDir. +func (wd *WorkingDir) GetHelper() *Helper { + return wd.h +} + +// SetConfig sets a new configuration for the working directory. +// +// This must be called at least once before any call to Init, Plan, Apply, or +// Destroy to establish the configuration. Any previously-set configuration is +// discarded and any saved plan is cleared. +func (wd *WorkingDir) SetConfig(cfg string) error { + configFilename := filepath.Join(wd.baseDir, ConfigFileName) + err := ioutil.WriteFile(configFilename, []byte(cfg), 0700) + if err != nil { + return err + } + + var mismatch *tfexec.ErrVersionMismatch + err = wd.tf.SetDisablePluginTLS(true) + if err != nil && !errors.As(err, &mismatch) { + return err + } + err = wd.tf.SetSkipProviderVerify(true) + if err != nil && !errors.As(err, &mismatch) { + return err + } + + if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { + wd.tf.SetLogPath(p) + } + + // Changing configuration invalidates any saved plan. + err = wd.ClearPlan() + if err != nil { + return err + } + return nil +} + +// ClearState deletes any Terraform state present in the working directory. +// +// Any remote objects tracked by the state are not destroyed first, so this +// will leave them dangling in the remote system. +func (wd *WorkingDir) ClearState() error { + err := os.Remove(filepath.Join(wd.baseDir, "terraform.tfstate")) + if os.IsNotExist(err) { + return nil + } + return err +} + +// ClearPlan deletes any saved plan present in the working directory. +func (wd *WorkingDir) ClearPlan() error { + err := os.Remove(wd.planFilename()) + if os.IsNotExist(err) { + return nil + } + return err +} + +// Init runs "terraform init" for the given working directory, forcing Terraform +// to use the current version of the plugin under test. +func (wd *WorkingDir) Init() error { + if _, err := os.Stat(wd.configFilename()); err != nil { + return fmt.Errorf("must call SetConfig before Init") + } + + return wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo)) +} + +func (wd *WorkingDir) configFilename() string { + return filepath.Join(wd.baseDir, ConfigFileName) +} + +func (wd *WorkingDir) planFilename() string { + return filepath.Join(wd.baseDir, PlanFileName) +} + +// CreatePlan runs "terraform plan" to create a saved plan file, which if successful +// will then be used for the next call to Apply. +func (wd *WorkingDir) CreatePlan() error { + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName)) + return err +} + +// CreateDestroyPlan runs "terraform plan -destroy" to create a saved plan +// file, which if successful will then be used for the next call to Apply. +func (wd *WorkingDir) CreateDestroyPlan() error { + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName), tfexec.Destroy(true)) + return err +} + +// Apply runs "terraform apply". If CreatePlan has previously completed +// successfully and the saved plan has not been cleared in the meantime then +// this will apply the saved plan. Otherwise, it will implicitly create a new +// plan and apply it. +func (wd *WorkingDir) Apply() error { + args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} + if wd.HasSavedPlan() { + args = append(args, tfexec.DirOrPlan(PlanFileName)) + } + + return wd.tf.Apply(context.Background(), args...) +} + +// Destroy runs "terraform destroy". It does not consider or modify any saved +// plan, and is primarily for cleaning up at the end of a test run. +// +// If destroy fails then remote objects might still exist, and continue to +// exist after a particular test is concluded. +func (wd *WorkingDir) Destroy() error { + return wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) +} + +// HasSavedPlan returns true if there is a saved plan in the working directory. If +// so, a subsequent call to Apply will apply that saved plan. +func (wd *WorkingDir) HasSavedPlan() bool { + _, err := os.Stat(wd.planFilename()) + return err == nil +} + +// SavedPlan returns an object describing the current saved plan file, if any. +// +// If no plan is saved or if the plan file cannot be read, SavedPlan returns +// an error. +func (wd *WorkingDir) SavedPlan() (*tfjson.Plan, error) { + if !wd.HasSavedPlan() { + return nil, fmt.Errorf("there is no current saved plan") + } + + return wd.tf.ShowPlanFile(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) +} + +// SavedPlanRawStdout returns a human readable stdout capture of the current saved plan file, if any. +// +// If no plan is saved or if the plan file cannot be read, SavedPlanRawStdout returns +// an error. +func (wd *WorkingDir) SavedPlanRawStdout() (string, error) { + if !wd.HasSavedPlan() { + return "", fmt.Errorf("there is no current saved plan") + } + + var ret bytes.Buffer + + wd.tf.SetStdout(&ret) + defer wd.tf.SetStdout(ioutil.Discard) + _, err := wd.tf.ShowPlanFileRaw(context.Background(), wd.planFilename(), tfexec.Reattach(wd.reattachInfo)) + if err != nil { + return "", err + } + + return ret.String(), nil +} + +// State returns an object describing the current state. +// + +// If the state cannot be read, State returns an error. +func (wd *WorkingDir) State() (*tfjson.State, error) { + return wd.tf.Show(context.Background(), tfexec.Reattach(wd.reattachInfo)) +} + +// Import runs terraform import +func (wd *WorkingDir) Import(resource, id string) error { + return wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) +} + +// Refresh runs terraform refresh +func (wd *WorkingDir) Refresh() error { + return wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.State(filepath.Join(wd.baseDir, "terraform.tfstate"))) +} + +// Schemas returns an object describing the provider schemas. +// +// If the schemas cannot be read, Schemas returns an error. +func (wd *WorkingDir) Schemas() (*tfjson.ProviderSchemas, error) { + return wd.tf.ProvidersSchema(context.Background()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go new file mode 100644 index 00000000..2201d0c0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go @@ -0,0 +1,56 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/hashicorp/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go new file mode 100644 index 00000000..b063bc1d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go @@ -0,0 +1,81 @@ +package tfdiags + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go new file mode 100644 index 00000000..a36da370 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go @@ -0,0 +1,20 @@ +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description +} + +type Severity rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go new file mode 100644 index 00000000..34816208 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go @@ -0,0 +1,31 @@ +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func Diag(sev Severity, summary, detail string) Diagnostic { + return &diagnosticBase{ + severity: sev, + summary: summary, + detail: detail, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go new file mode 100644 index 00000000..925c14fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go @@ -0,0 +1,211 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "sort" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + + switch { + case iSev != jSev: + return iSev == Warning + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/tfdiags/doc.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go new file mode 100644 index 00000000..b63c5d74 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go @@ -0,0 +1,24 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func FromError(err error) Diagnostic { + return &nativeError{ + err: err, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go new file mode 100644 index 00000000..1b78887e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go @@ -0,0 +1,41 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go new file mode 100644 index 00000000..09191704 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go @@ -0,0 +1,20 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go new file mode 100644 index 00000000..eb7b3f08 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -0,0 +1,36 @@ +// The meta package provides a location to set the release version +// and any other relevant metadata for the SDK. +// +// This package should not import any other SDK packages. +package meta + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var SDKVersion = "2.4.3" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var SDKPrerelease = "" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(SDKVersion)) +} + +// VersionString returns the complete version string, including prerelease +func SDKVersionString() string { + if SDKPrerelease != "" { + return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) + } + return SDKVersion +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go new file mode 100644 index 00000000..fb4f8140 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go @@ -0,0 +1,116 @@ +package plugin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/signal" + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-plugin" +) + +// ReattachConfig holds the information Terraform needs to be able to attach +// itself to a provider process, so it can drive the process. +type ReattachConfig struct { + Protocol string + Pid int + Test bool + Addr ReattachConfigAddr +} + +// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. +type ReattachConfigAddr struct { + Network string + String string +} + +// DebugServe starts a plugin server in debug mode; this should only be used +// when the provider will manage its own lifecycle. It is not recommended for +// normal usage; Serve is the correct function for that. +func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan struct{}, error) { + reattachCh := make(chan *plugin.ReattachConfig) + closeCh := make(chan struct{}) + + opts.TestConfig = &plugin.ServeTestConfig{ + Context: ctx, + ReattachConfigCh: reattachCh, + CloseCh: closeCh, + } + + go Serve(opts) + + var config *plugin.ReattachConfig + select { + case config = <-reattachCh: + case <-time.After(2 * time.Second): + return ReattachConfig{}, closeCh, errors.New("timeout waiting on reattach config") + } + + if config == nil { + return ReattachConfig{}, closeCh, errors.New("nil reattach config received") + } + + return ReattachConfig{ + Protocol: string(config.Protocol), + Pid: config.Pid, + Test: config.Test, + Addr: ReattachConfigAddr{ + Network: config.Addr.Network(), + String: config.Addr.String(), + }, + }, closeCh, nil +} + +// Debug starts a debug server and controls its lifecycle, printing the +// information needed for Terraform to connect to the provider to stdout. +// os.Interrupt will be captured and used to stop the server. +func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { + ctx, cancel := context.WithCancel(ctx) + // Ctrl-C will stop the server + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer func() { + signal.Stop(sigCh) + cancel() + }() + config, closeCh, err := DebugServe(ctx, opts) + if err != nil { + return fmt.Errorf("Error launching debug server: %w", err) + } + go func() { + select { + case <-sigCh: + cancel() + case <-ctx.Done(): + } + }() + reattachBytes, err := json.Marshal(map[string]ReattachConfig{ + providerAddr: config, + }) + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + reattachStr := string(reattachBytes) + + fmt.Printf("Provider started, to attach Terraform set the TF_REATTACH_PROVIDERS env var:\n\n") + switch runtime.GOOS { + case "windows": + fmt.Printf("\tCommand Prompt:\tset \"TF_REATTACH_PROVIDERS=%s\"\n", reattachStr) + fmt.Printf("\tPowerShell:\t$env:TF_REATTACH_PROVIDERS='%s'\n", strings.ReplaceAll(reattachStr, `'`, `''`)) + case "linux", "darwin": + fmt.Printf("\tTF_REATTACH_PROVIDERS='%s'\n", strings.ReplaceAll(reattachStr, `'`, `'"'"'`)) + default: + fmt.Println(reattachStr) + } + fmt.Println("") + + // wait for the server to be done + <-closeCh + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go new file mode 100644 index 00000000..baaab2d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -0,0 +1,99 @@ +package plugin + +import ( + "log" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + tf5server "github.com/hashicorp/terraform-plugin-go/tfprotov5/server" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() *schema.Provider +type GRPCProviderFunc func() tfprotov5.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + + // Logger is the logger that go-plugin will use. + Logger hclog.Logger + + // TestConfig should only be set when the provider is being tested; it + // will opt out of go-plugin's lifecycle management and other features, + // and will use the supplied configuration options to control the + // plugin's lifecycle and communicate connection information. See the + // go-plugin GoDoc for more information. + TestConfig *plugin.ServeTestConfig + + // Set NoLogOutputOverride to not override the log output with an hclog + // adapter. This should only be used when running the plugin in + // acceptance tests. + NoLogOutputOverride bool +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + if !opts.NoLogOutputOverride { + // In order to allow go-plugin to correctly pass log-levels through to + // terraform, we need to use an hclog.Logger with JSON output. We can + // inject this into the std `log` package here, so existing providers will + // make use of it automatically. + logger := hclog.New(&hclog.LoggerOptions{ + // We send all output to terraform. Go-plugin will take the output and + // pass it through another hclog.Logger on the client side where it can + // be filtered. + Level: hclog.Trace, + JSONFormat: true, + }) + log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) + } + + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + opts.GRPCProviderFunc = func() tfprotov5.ProviderServer { + return schema.NewGRPCProviderServer(opts.ProviderFunc()) + } + } + + provider := opts.GRPCProviderFunc() + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: map[int]plugin.PluginSet{ + 5: { + ProviderPluginName: &tf5server.GRPCProviderPlugin{ + GRPCProvider: func() tfprotov5.ProviderServer { + return provider + }, + }, + }, + }, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) + }, + Logger: opts.Logger, + Test: opts.TestConfig, + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go new file mode 100644 index 00000000..c7d82cf3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go @@ -0,0 +1,990 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +// diffChangeType is an enum with the kind of changes a diff has planned. +type diffChangeType byte + +const ( + diffInvalid diffChangeType = iota + diffNone + diffCreate + diffUpdate + diffDestroy + diffDestroyCreate +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type diffAttrType +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type diffAttrType byte + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +// ChangeType returns the diffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() diffChangeType { + if d.Empty() { + return diffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return diffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return diffDestroy + } + + if d.RequiresNew() { + return diffCreate + } + + return diffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +// TODO: investigate why removing this unused method causes panic in tests +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2 := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k := range d.Attributes { + checkOld[k] = struct{}{} + } + for k := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2 := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2 := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2 := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go new file mode 100644 index 00000000..b01e5a48 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go + +// instanceType is an enum of the various types of instances store in the State +type instanceType int + +const ( + typeInvalid instanceType = iota + typePrimary + typeTainted + typeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go new file mode 100644 index 00000000..782ef90c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=instanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[typeInvalid-0] + _ = x[typePrimary-1] + _ = x[typeTainted-2] + _ = x[typeDeposed-3] +} + +const _instanceType_name = "typeInvalidtypePrimarytypeTaintedtypeDeposed" + +var _instanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i instanceType) String() string { + if i < 0 || i >= instanceType(len(_instanceType_index)-1) { + return "instanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _instanceType_name[_instanceType_index[i]:_instanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go new file mode 100644 index 00000000..11b63de8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go @@ -0,0 +1,333 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +// TODO: investigate why deleting this causes odd runtime test failures +// must be some kind of interface implementation +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go new file mode 100644 index 00000000..ec2665d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go @@ -0,0 +1,226 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// resourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type resourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType instanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// String outputs the address that parses into this address. +func (r *resourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case typePrimary: + name += ".primary" + case typeDeposed: + name += ".deposed" + case typeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +func parseResourceAddress(s string) (*resourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := parseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := parseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := parseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &resourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *resourceAddress) Less(other *resourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func parseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func parseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func parseInstanceType(s string) (instanceType, error) { + switch s { + case "", "primary": + return typePrimary, nil + case "deposed": + return typeDeposed, nil + case "tainted": + return typeTainted, nil + default: + return typeInvalid, fmt.Errorf("Unexpected value for instanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/terraform/resource_mode.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_mode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go similarity index 100% rename from vendor/github.com/hashicorp/terraform/terraform/resource_mode_string.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go new file mode 100644 index 00000000..ece8fc66 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go @@ -0,0 +1,26 @@ +package terraform + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go new file mode 100644 index 00000000..07e5a84f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go @@ -0,0 +1,26 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go new file mode 100644 index 00000000..87f7610a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -0,0 +1,1685 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +const ( + // StateVersion is the current version for our state file + stateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex + + // IsBinaryDrivenTest is a special flag that assists with a binary driver + // heuristic, it should not be set externally + IsBinaryDrivenTest bool +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &stateFilter{State: s} + results, err := filter.filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = stateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } + } else { + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func parseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go similarity index 77% rename from vendor/github.com/hashicorp/terraform/terraform/state_filter.go rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go index 2dcb11b7..01d03927 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go @@ -5,7 +5,7 @@ import ( "sort" ) -// StateFilter is responsible for filtering and searching a state. +// stateFilter is responsible for filtering and searching a state. // // This is a separate struct from State rather than a method on State // because StateFilter might create sidecar data structures to optimize @@ -15,18 +15,18 @@ import ( // Reset should be called or a new one should be allocated. StateFilter // will not watch State for changes and do this for you. If you filter after // changing the State without calling Reset, the behavior is not defined. -type StateFilter struct { +type stateFilter struct { State *State } // Filter takes the addresses specified by fs and finds all the matches. // The values of fs are resource addressing syntax that can be parsed by -// ParseResourceAddress. -func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { +// parseResourceAddress. +func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { // Parse all the addresses - as := make([]*ResourceAddress, len(fs)) + as := make([]*resourceAddress, len(fs)) for i, v := range fs { - a, err := ParseResourceAddress(v) + a, err := parseResourceAddress(v) if err != nil { return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) } @@ -36,12 +36,12 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { // If we weren't given any filters, then we list all if len(fs) == 0 { - as = append(as, &ResourceAddress{Index: -1}) + as = append(as, &resourceAddress{Index: -1}) } // Filter each of the address. We keep track of this in a map to // strip duplicates. - resultSet := make(map[string]*StateFilterResult) + resultSet := make(map[string]*stateFilterResult) for _, a := range as { for _, r := range f.filterSingle(a) { resultSet[r.String()] = r @@ -49,19 +49,19 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { } // Make the result list - results := make([]*StateFilterResult, 0, len(resultSet)) + results := make([]*stateFilterResult, 0, len(resultSet)) for _, v := range resultSet { results = append(results, v) } // Sort them and return - sort.Sort(StateFilterResultSlice(results)) + sort.Sort(stateFilterResultSlice(results)) return results, nil } -func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { +func (f *stateFilter) filterSingle(a *resourceAddress) []*stateFilterResult { // The slice to keep track of results - var results []*StateFilterResult + var results []*stateFilterResult // Go through modules first. modules := make([]*ModuleState, 0, len(f.State.Modules)) @@ -72,9 +72,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // Only add the module to the results if we haven't specified a type. // We also ignore the root module. if a.Type == "" && len(m.Path) > 1 { - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: m.Path[1:], - Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Address: (&resourceAddress{Path: m.Path[1:]}).String(), Value: m, }) } @@ -86,7 +86,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { for _, m := range modules { for n, r := range m.Resources { // The name in the state contains valuable information. Parse. - key, err := ParseResourceStateKey(n) + key, err := parseResourceStateKey(n) if err != nil { // If we get an error parsing, then just ignore it // out of the state. @@ -116,7 +116,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // Build the address for this resource - addr := &ResourceAddress{ + addr := &resourceAddress{ Path: m.Path[1:], Name: key.Name, Type: key.Type, @@ -124,7 +124,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // Add the resource level result - resourceResult := &StateFilterResult{ + resourceResult := &stateFilterResult{ Path: addr.Path, Address: addr.String(), Value: r, @@ -135,9 +135,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // Add the instances if r.Primary != nil { - addr.InstanceType = TypePrimary + addr.InstanceType = typePrimary addr.InstanceTypeSet = false - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: addr.Path, Address: addr.String(), Parent: resourceResult, @@ -147,9 +147,9 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { for _, instance := range r.Deposed { if f.relevant(a, instance) { - addr.InstanceType = TypeDeposed + addr.InstanceType = typeDeposed addr.InstanceTypeSet = true - results = append(results, &StateFilterResult{ + results = append(results, &stateFilterResult{ Path: addr.Path, Address: addr.String(), Parent: resourceResult, @@ -165,7 +165,7 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { } // relevant checks for relevance of this address against the given value. -func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { +func (f *stateFilter) relevant(addr *resourceAddress, raw interface{}) bool { switch v := raw.(type) { case *ModuleState: path := v.Path[1:] @@ -202,10 +202,10 @@ func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { } } -// StateFilterResult is a single result from a filter operation. Filter +// stateFilterResult is a single result from a filter operation. Filter // can match multiple things within a state (module, resource, instance, etc.) // and this unifies that. -type StateFilterResult struct { +type stateFilterResult struct { // Module path of the result Path []string @@ -215,7 +215,7 @@ type StateFilterResult struct { // Parent, if non-nil, is a parent of this result. For instances, the // parent would be a resource. For resources, the parent would be // a module. For modules, this is currently nil. - Parent *StateFilterResult + Parent *stateFilterResult // Value is the actual value. This must be type switched on. It can be // any data structures that `State` can hold: `ModuleState`, @@ -223,11 +223,11 @@ type StateFilterResult struct { Value interface{} } -func (r *StateFilterResult) String() string { +func (r *stateFilterResult) String() string { return fmt.Sprintf("%T: %s", r.Value, r.Address) } -func (r *StateFilterResult) sortedType() int { +func (r *stateFilterResult) sortedType() int { switch r.Value.(type) { case *ModuleState: return 0 @@ -240,19 +240,19 @@ func (r *StateFilterResult) sortedType() int { } } -// StateFilterResultSlice is a slice of results that implements +// stateFilterResultSlice is a slice of results that implements // sort.Interface. The sorting goal is what is most appealing to // human output. -type StateFilterResultSlice []*StateFilterResult +type stateFilterResultSlice []*stateFilterResult -func (s StateFilterResultSlice) Len() int { return len(s) } -func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s StateFilterResultSlice) Less(i, j int) bool { +func (s stateFilterResultSlice) Len() int { return len(s) } +func (s stateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s stateFilterResultSlice) Less(i, j int) bool { a, b := s[i], s[j] // if these address contain an index, we want to sort by index rather than name - addrA, errA := ParseResourceAddress(a.Address) - addrB, errB := ParseResourceAddress(b.Address) + addrA, errA := parseResourceAddress(a.Address) + addrB, errB := parseResourceAddress(b.Address) if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { return addrA.Index < addrB.Index } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go new file mode 100644 index 00000000..01ac810f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "sort" +) + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go deleted file mode 100644 index 0dae567d..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go +++ /dev/null @@ -1,61 +0,0 @@ -package auth - -import ( - "github.com/hashicorp/terraform-svchost" -) - -// CachingCredentialsSource creates a new credentials source that wraps another -// and caches its results in memory, on a per-hostname basis. -// -// No means is provided for expiration of cached credentials, so a caching -// credentials source should have a limited lifetime (one Terraform operation, -// for example) to ensure that time-limited credentials don't expire before -// their cache entries do. -func CachingCredentialsSource(source CredentialsSource) CredentialsSource { - return &cachingCredentialsSource{ - source: source, - cache: map[svchost.Hostname]HostCredentials{}, - } -} - -type cachingCredentialsSource struct { - source CredentialsSource - cache map[svchost.Hostname]HostCredentials -} - -// ForHost passes the given hostname on to the wrapped credentials source and -// caches the result to return for future requests with the same hostname. -// -// Both credentials and non-credentials (nil) responses are cached. -// -// No cache entry is created if the wrapped source returns an error, to allow -// the caller to retry the failing operation. -func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if cache, cached := s.cache[host]; cached { - return cache, nil - } - - result, err := s.source.ForHost(host) - if err != nil { - return result, err - } - - s.cache[host] = result - return result, nil -} - -func (s *cachingCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see which object (old or new) is actually present. - delete(s.cache, host) - return s.source.StoreForHost(host, credentials) -} - -func (s *cachingCredentialsSource) ForgetForHost(host svchost.Hostname) error { - // We'll delete the cache entry even if the store fails, since that just - // means that the next read will go to the real store and get a chance to - // see if the object is still present. - delete(s.cache, host) - return s.source.ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go deleted file mode 100644 index 36441cd1..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package auth contains types and functions to manage authentication -// credentials for service hosts. -package auth - -import ( - "fmt" - "net/http" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform-svchost" -) - -// Credentials is a list of CredentialsSource objects that can be tried in -// turn until one returns credentials for a host, or one returns an error. -// -// A Credentials is itself a CredentialsSource, wrapping its members. -// In principle one CredentialsSource can be nested inside another, though -// there is no good reason to do so. -// -// The write operations on a Credentials are tried only on the first object, -// under the assumption that it is the primary store. -type Credentials []CredentialsSource - -// NoCredentials is an empty CredentialsSource that always returns nil -// when asked for credentials. -var NoCredentials CredentialsSource = Credentials{} - -// A CredentialsSource is an object that may be able to provide credentials -// for a given host. -// -// Credentials lookups are not guaranteed to be concurrency-safe. Callers -// using these facilities in concurrent code must use external concurrency -// primitives to prevent race conditions. -type CredentialsSource interface { - // ForHost returns a non-nil HostCredentials if the source has credentials - // available for the host, and a nil HostCredentials if it does not. - // - // If an error is returned, progress through a list of CredentialsSources - // is halted and the error is returned to the user. - ForHost(host svchost.Hostname) (HostCredentials, error) - - // StoreForHost takes a HostCredentialsWritable and saves it as the - // credentials for the given host. - // - // If credentials are already stored for the given host, it will try to - // replace those credentials but may produce an error if such replacement - // is not possible. - StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error - - // ForgetForHost discards any stored credentials for the given host. It - // does nothing and returns successfully if no credentials are saved - // for that host. - ForgetForHost(host svchost.Hostname) error -} - -// HostCredentials represents a single set of credentials for a particular -// host. -type HostCredentials interface { - // PrepareRequest modifies the given request in-place to apply the - // receiving credentials. The usual behavior of this method is to - // add some sort of Authorization header to the request. - PrepareRequest(req *http.Request) - - // Token returns the authentication token. - Token() string -} - -// HostCredentialsWritable is an extension of HostCredentials for credentials -// objects that can be serialized as a JSON-compatible object value for -// storage. -type HostCredentialsWritable interface { - HostCredentials - - // ToStore returns a cty.Value, always of an object type, - // representing data that can be serialized to represent this object - // in persistent storage. - // - // The resulting value may uses only cty values that can be accepted - // by the cty JSON encoder, though the caller may elect to instead store - // it in some other format that has a JSON-compatible type system. - ToStore() cty.Value -} - -// ForHost iterates over the contained CredentialsSource objects and -// tries to obtain credentials for the given host from each one in turn. -// -// If any source returns either a non-nil HostCredentials or a non-nil error -// then this result is returned. Otherwise, the result is nil, nil. -func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { - for _, source := range c { - creds, err := source.ForHost(host) - if creds != nil || err != nil { - return creds, err - } - } - return nil, nil -} - -// StoreForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].StoreForHost(host, credentials) -} - -// ForgetForHost passes the given arguments to the same operation on the -// first CredentialsSource in the receiver. -func (c Credentials) ForgetForHost(host svchost.Hostname) error { - if len(c) == 0 { - return fmt.Errorf("no credentials store is available") - } - - return c[0].ForgetForHost(host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go deleted file mode 100644 index 7198c674..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go +++ /dev/null @@ -1,48 +0,0 @@ -package auth - -import ( - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsFromMap converts a map of key-value pairs from a credentials -// definition provided by the user (e.g. in a config file, or via a credentials -// helper) into a HostCredentials object if possible, or returns nil if -// no credentials could be extracted from the map. -// -// This function ignores map keys it is unfamiliar with, to allow for future -// expansion of the credentials map format for new credential types. -func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { - if m == nil { - return nil - } - if token, ok := m["token"].(string); ok { - return HostCredentialsToken(token) - } - return nil -} - -// HostCredentialsFromObject converts a cty.Value of an object type into a -// HostCredentials object if possible, or returns nil if no credentials could -// be extracted from the map. -// -// This function ignores object attributes it is unfamiliar with, to allow for -// future expansion of the credentials object structure for new credential types. -// -// If the given value is not of an object type, this function will panic. -func HostCredentialsFromObject(obj cty.Value) HostCredentials { - if !obj.Type().HasAttribute("token") { - return nil - } - - tokenV := obj.GetAttr("token") - if tokenV.IsNull() || !tokenV.IsKnown() { - return nil - } - if !cty.String.Equals(tokenV.Type()) { - // Weird, but maybe some future Terraform version accepts an object - // here for some reason, so we'll be resilient. - return nil - } - - return HostCredentialsToken(tokenV.AsString()) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go deleted file mode 100644 index 76505f20..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go +++ /dev/null @@ -1,149 +0,0 @@ -package auth - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "path/filepath" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform-svchost" -) - -type helperProgramCredentialsSource struct { - executable string - args []string -} - -// HelperProgramCredentialsSource returns a CredentialsSource that runs the -// given program with the given arguments in order to obtain credentials. -// -// The given executable path must be an absolute path; it is the caller's -// responsibility to validate and process a relative path or other input -// provided by an end-user. If the given path is not absolute, this -// function will panic. -// -// When credentials are requested, the program will be run in a child process -// with the given arguments along with two additional arguments added to the -// end of the list: the literal string "get", followed by the requested -// hostname in ASCII compatibility form (punycode form). -func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { - if !filepath.IsAbs(executable) { - panic("NewCredentialsSourceHelperProgram requires absolute path to executable") - } - - fullArgs := make([]string, len(args)+1) - fullArgs[0] = executable - copy(fullArgs[1:], args) - - return &helperProgramCredentialsSource{ - executable: executable, - args: fullArgs, - } -} - -func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "get") - args = append(args, string(host)) - - outBuf := bytes.Buffer{} - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stdout: &outBuf, - Stderr: &errBuf, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return nil, fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - var m map[string]interface{} - err = json.Unmarshal(outBuf.Bytes(), &m) - if err != nil { - return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) - } - - return HostCredentialsFromMap(m), nil -} - -func (s *helperProgramCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "store") - args = append(args, string(host)) - - toStore := credentials.ToStore() - toStoreRaw, err := ctyjson.Marshal(toStore, toStore.Type()) - if err != nil { - return fmt.Errorf("can't serialize credentials to store: %s", err) - } - - inReader := bytes.NewReader(toStoreRaw) - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: inReader, - Stderr: &errBuf, - Stdout: nil, - } - err = cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} - -func (s *helperProgramCredentialsSource) ForgetForHost(host svchost.Hostname) error { - args := make([]string, len(s.args), len(s.args)+2) - copy(args, s.args) - args = append(args, "forget") - args = append(args, string(host)) - - errBuf := bytes.Buffer{} - - cmd := exec.Cmd{ - Path: s.executable, - Args: args, - Stdin: nil, - Stderr: &errBuf, - Stdout: nil, - } - err := cmd.Run() - if _, isExitErr := err.(*exec.ExitError); isExitErr { - errText := errBuf.String() - if errText == "" { - // Shouldn't happen for a well-behaved helper program - return fmt.Errorf("error in %s, but it produced no error message", s.executable) - } - return fmt.Errorf("error in %s: %s", s.executable, errText) - } else if err != nil { - return fmt.Errorf("failed to run %s: %s", s.executable, err) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go deleted file mode 100644 index f8b0b076..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go +++ /dev/null @@ -1,38 +0,0 @@ -package auth - -import ( - "fmt" - - "github.com/hashicorp/terraform-svchost" -) - -// StaticCredentialsSource is a credentials source that retrieves credentials -// from the provided map. It returns nil if a requested hostname is not -// present in the map. -// -// The caller should not modify the given map after passing it to this function. -func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { - return staticCredentialsSource(creds) -} - -type staticCredentialsSource map[svchost.Hostname]map[string]interface{} - -func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { - if s == nil { - return nil, nil - } - - if m, exists := s[host]; exists { - return HostCredentialsFromMap(m), nil - } - - return nil, nil -} - -func (s staticCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { - return fmt.Errorf("can't store new credentials in a static credentials source") -} - -func (s staticCredentialsSource) ForgetForHost(host svchost.Hostname) error { - return fmt.Errorf("can't discard credentials from a static credentials source") -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go deleted file mode 100644 index 1d36553a..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go +++ /dev/null @@ -1,43 +0,0 @@ -package auth - -import ( - "net/http" - - "github.com/zclconf/go-cty/cty" -) - -// HostCredentialsToken is a HostCredentials implementation that represents a -// single "bearer token", to be sent to the server via an Authorization header -// with the auth type set to "Bearer". -// -// To save a token as the credentials for a host, convert the token string to -// this type and use the result as a HostCredentialsWritable implementation. -type HostCredentialsToken string - -// Interface implementation assertions. Compilation will fail here if -// HostCredentialsToken does not fully implement these interfaces. -var _ HostCredentials = HostCredentialsToken("") -var _ HostCredentialsWritable = HostCredentialsToken("") - -// PrepareRequest alters the given HTTP request by setting its Authorization -// header to the string "Bearer " followed by the encapsulated authentication -// token. -func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { - if req.Header == nil { - req.Header = http.Header{} - } - req.Header.Set("Authorization", "Bearer "+string(tc)) -} - -// Token returns the authentication token. -func (tc HostCredentialsToken) Token() string { - return string(tc) -} - -// ToStore returns a credentials object with a single attribute "token" whose -// value is the token string. -func (tc HostCredentialsToken) ToStore() cty.Value { - return cty.ObjectVal(map[string]cty.Value{ - "token": cty.StringVal(string(tc)), - }) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go deleted file mode 100644 index 97831363..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go +++ /dev/null @@ -1,275 +0,0 @@ -// Package disco handles Terraform's remote service discovery protocol. -// -// This protocol allows mapping from a service hostname, as produced by the -// svchost package, to a set of services supported by that host and the -// endpoint information for each supported service. -package disco - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net/http" - "net/url" - "time" - - "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/auth" -) - -const ( - // Fixed path to the discovery manifest. - discoPath = "/.well-known/terraform.json" - - // Arbitrary-but-small number to prevent runaway redirect loops. - maxRedirects = 3 - - // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. - discoTimeout = 11 * time.Second - - // 1MB - to prevent abusive services from using loads of our memory. - maxDiscoDocBytes = 1 * 1024 * 1024 -) - -// httpTransport is overridden during tests, to skip TLS verification. -var httpTransport = defaultHttpTransport() - -// Disco is the main type in this package, which allows discovery on given -// hostnames and caches the results by hostname to avoid repeated requests -// for the same information. -type Disco struct { - hostCache map[svchost.Hostname]*Host - credsSrc auth.CredentialsSource - - // Transport is a custom http.RoundTripper to use. - Transport http.RoundTripper -} - -// New returns a new initialized discovery object. -func New() *Disco { - return NewWithCredentialsSource(nil) -} - -// NewWithCredentialsSource returns a new discovery object initialized with -// the given credentials source. -func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { - return &Disco{ - hostCache: make(map[svchost.Hostname]*Host), - credsSrc: credsSrc, - Transport: httpTransport, - } -} - -func (d *Disco) SetUserAgent(uaString string) { - d.Transport = &userAgentRoundTripper{ - innerRt: d.Transport, - userAgent: uaString, - } -} - -// SetCredentialsSource provides a credentials source that will be used to -// add credentials to outgoing discovery requests, where available. -// -// If this method is never called, no outgoing discovery requests will have -// credentials. -func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { - d.credsSrc = src -} - -// CredentialsSource returns the credentials source associated with the receiver, -// or an empty credentials source if none is associated. -func (d *Disco) CredentialsSource() auth.CredentialsSource { - if d.credsSrc == nil { - // We'll return an empty one just to save the caller from having to - // protect against the nil case, since this interface already allows - // for the possibility of there being no credentials at all. - return auth.StaticCredentialsSource(nil) - } - return d.credsSrc -} - -// CredentialsForHost returns a non-nil HostCredentials if the embedded source has -// credentials available for the host, and a nil HostCredentials if it does not. -func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { - if d.credsSrc == nil { - return nil, nil - } - return d.credsSrc.ForHost(hostname) -} - -// ForceHostServices provides a pre-defined set of services for a given -// host, which prevents the receiver from attempting network-based discovery -// for the given host. Instead, the given services map will be returned -// verbatim. -// -// When providing "forced" services, any relative URLs are resolved against -// the initial discovery URL that would have been used for network-based -// discovery, yielding the same results as if the given map were published -// at the host's default discovery URL, though using absolute URLs is strongly -// recommended to make the configured behavior more explicit. -func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { - if services == nil { - services = map[string]interface{}{} - } - - d.hostCache[hostname] = &Host{ - discoURL: &url.URL{ - Scheme: "https", - Host: string(hostname), - Path: discoPath, - }, - hostname: hostname.ForDisplay(), - services: services, - transport: d.Transport, - } -} - -// Discover runs the discovery protocol against the given hostname (which must -// already have been validated and prepared with svchost.ForComparison) and -// returns an object describing the services available at that host. -// -// If a given hostname supports no Terraform services at all, a non-nil but -// empty Host object is returned. When giving feedback to the end user about -// such situations, we say "host does not provide a service", -// regardless of whether that is due to that service specifically being absent -// or due to the host not providing Terraform services at all, since we don't -// wish to expose the detail of whole-host discovery to an end-user. -func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { - if host, cached := d.hostCache[hostname]; cached { - return host, nil - } - - host, err := d.discover(hostname) - if err != nil { - return nil, err - } - d.hostCache[hostname] = host - - return host, nil -} - -// DiscoverServiceURL is a convenience wrapper for discovery on a given -// hostname and then looking up a particular service in the result. -func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { - host, err := d.Discover(hostname) - if err != nil { - return nil, err - } - return host.ServiceURL(serviceID) -} - -// discover implements the actual discovery process, with its result cached -// by the public-facing Discover method. -func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { - discoURL := &url.URL{ - Scheme: "https", - Host: hostname.String(), - Path: discoPath, - } - - client := &http.Client{ - Transport: d.Transport, - Timeout: discoTimeout, - - CheckRedirect: func(req *http.Request, via []*http.Request) error { - log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) - if len(via) > maxRedirects { - return errors.New("too many redirects") // this error will never actually be seen - } - return nil - }, - } - - req := &http.Request{ - Header: make(http.Header), - Method: "GET", - URL: discoURL, - } - req.Header.Set("Accept", "application/json") - - creds, err := d.CredentialsForHost(hostname) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) - } - if creds != nil { - // Update the request to include credentials. - creds.PrepareRequest(req) - } - - log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request discovery document: %v", err) - } - defer resp.Body.Close() - - host := &Host{ - // Use the discovery URL from resp.Request in - // case the client followed any redirects. - discoURL: resp.Request.URL, - hostname: hostname.ForDisplay(), - transport: d.Transport, - } - - // Return the host without any services. - if resp.StatusCode == 404 { - return host, nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) - } - - contentType := resp.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) - } - if mediaType != "application/json" { - return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) - } - - // This doesn't catch chunked encoding, because ContentLength is -1 in that case. - if resp.ContentLength > maxDiscoDocBytes { - // Size limit here is not a contractual requirement and so we may - // adjust it over time if we find a different limit is warranted. - return nil, fmt.Errorf( - "Discovery doc response is too large (got %d bytes; limit %d)", - resp.ContentLength, maxDiscoDocBytes, - ) - } - - // If the response is using chunked encoding then we can't predict its - // size, but we'll at least prevent reading the entire thing into memory. - lr := io.LimitReader(resp.Body, maxDiscoDocBytes) - - servicesBytes, err := ioutil.ReadAll(lr) - if err != nil { - return nil, fmt.Errorf("Error reading discovery document body: %v", err) - } - - var services map[string]interface{} - err = json.Unmarshal(servicesBytes, &services) - if err != nil { - return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) - } - host.services = services - - return host, nil -} - -// Forget invalidates any cached record of the given hostname. If the host -// has no cache entry then this is a no-op. -func (d *Disco) Forget(hostname svchost.Hostname) { - delete(d.hostCache, hostname) -} - -// ForgetAll is like Forget, but for all of the hostnames that have cache entries. -func (d *Disco) ForgetAll() { - d.hostCache = make(map[svchost.Hostname]*Host) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go b/vendor/github.com/hashicorp/terraform-svchost/disco/host.go deleted file mode 100644 index 2d0fc9f1..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go +++ /dev/null @@ -1,412 +0,0 @@ -package disco - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-version" -) - -const versionServiceID = "versions.v1" - -// Host represents a service discovered host. -type Host struct { - discoURL *url.URL - hostname string - services map[string]interface{} - transport http.RoundTripper -} - -// Constraints represents the version constraints of a service. -type Constraints struct { - Service string `json:"service"` - Product string `json:"product"` - Minimum string `json:"minimum"` - Maximum string `json:"maximum"` - Excluding []string `json:"excluding"` -} - -// ErrServiceNotProvided is returned when the service is not provided. -type ErrServiceNotProvided struct { - hostname string - service string -} - -// Error returns a customized error message. -func (e *ErrServiceNotProvided) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not provide a %s service", e.service) - } - return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) -} - -// ErrVersionNotSupported is returned when the version is not supported. -type ErrVersionNotSupported struct { - hostname string - service string - version string -} - -// Error returns a customized error message. -func (e *ErrVersionNotSupported) Error() string { - if e.hostname == "" { - return fmt.Sprintf("host does not support %s version %s", e.service, e.version) - } - return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) -} - -// ErrNoVersionConstraints is returned when checkpoint was disabled -// or the endpoint to query for version constraints was unavailable. -type ErrNoVersionConstraints struct { - disabled bool -} - -// Error returns a customized error message. -func (e *ErrNoVersionConstraints) Error() string { - if e.disabled { - return "checkpoint disabled" - } - return "unable to contact versions service" -} - -// ServiceURL returns the URL associated with the given service identifier, -// which should be of the form "servicename.vN". -// -// A non-nil result is always an absolute URL with a scheme of either HTTPS -// or HTTP. -func (h *Host) ServiceURL(id string) (*url.URL, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - urlStr, ok := h.services[id].(string) - if !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse service URL: %v", err) - } - - return u, nil -} - -// ServiceOAuthClient returns the OAuth client configuration associated with the -// given service identifier, which should be of the form "servicename.vN". -// -// This is an alternative to ServiceURL for unusual services that require -// a full OAuth2 client definition rather than just a URL. Use this only -// for services whose specification calls for this sort of definition. -func (h *Host) ServiceOAuthClient(id string) (*OAuthClient, error) { - svc, ver, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - if _, ok := h.services[id]; !ok { - // See if we have a matching service as that would indicate - // the service is supported, but not the requested version. - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - return nil, &ErrVersionNotSupported{ - hostname: h.hostname, - service: svc, - version: ver.Original(), - } - } - } - - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - var raw map[string]interface{} - switch v := h.services[id].(type) { - case map[string]interface{}: - raw = v // Great! - case []map[string]interface{}: - // An absolutely infuriating legacy HCL ambiguity. - raw = v[0] - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] The definition for %s has Go type %T", id, h.services[id]) - return nil, fmt.Errorf("Service %s must be declared with an object value in the service discovery document", id) - } - - var grantTypes OAuthGrantTypeSet - if rawGTs, ok := raw["grant_types"]; ok { - if gts, ok := rawGTs.([]interface{}); ok { - var kws []string - for _, gtI := range gts { - gt, ok := gtI.(string) - if !ok { - // We'll ignore this so that we can potentially introduce - // other types into this array later if we need to. - continue - } - kws = append(kws, gt) - } - grantTypes = NewOAuthGrantTypeSet(kws...) - } else { - return nil, fmt.Errorf("Service %s is defined with invalid grant_types property: must be an array of grant type strings", id) - } - } else { - grantTypes = NewOAuthGrantTypeSet("authz_code") - } - - ret := &OAuthClient{ - SupportedGrantTypes: grantTypes, - } - if clientIDStr, ok := raw["client"].(string); ok { - ret.ID = clientIDStr - } else { - return nil, fmt.Errorf("Service %s definition is missing required property \"client\"", id) - } - if urlStr, ok := raw["authz"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse authorization URL: %v", err) - } - ret.AuthorizationURL = u - } else { - if grantTypes.RequiresAuthorizationEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"authz\"", id) - } - } - if urlStr, ok := raw["token"].(string); ok { - u, err := h.parseURL(urlStr) - if err != nil { - return nil, fmt.Errorf("Failed to parse token URL: %v", err) - } - ret.TokenURL = u - } else { - if grantTypes.RequiresTokenEndpoint() { - return nil, fmt.Errorf("Service %s definition is missing required property \"token\"", id) - } - } - if portsRaw, ok := raw["ports"].([]interface{}); ok { - if len(portsRaw) != 2 { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: must be a two-element array", id) - } - invalidPortsErr := fmt.Errorf("Invalid \"ports\" definition for service %s: both ports must be whole numbers between 1024 and 65535", id) - ports := make([]uint16, 2) - for i := range ports { - switch v := portsRaw[i].(type) { - case float64: - // JSON unmarshaling always produces float64. HCL 2 might, if - // an invalid fractional number were given. - if float64(uint16(v)) != v || v < 1024 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - case int: - // Legacy HCL produces int. HCL 2 will too, if the given number - // is a whole number. - if v < 1024 || v > 65535 { - return nil, invalidPortsErr - } - ports[i] = uint16(v) - default: - // Debug message because raw Go types don't belong in our UI. - log.Printf("[DEBUG] Port value %d has Go type %T", i, portsRaw[i]) - return nil, invalidPortsErr - } - } - if ports[1] < ports[0] { - return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: minimum port cannot be greater than maximum port", id) - } - ret.MinPort = ports[0] - ret.MaxPort = ports[1] - } else { - // Default is to accept any port in the range, for a client that is - // able to call back to any localhost port. - ret.MinPort = 1024 - ret.MaxPort = 65535 - } - - return ret, nil -} - -func (h *Host) parseURL(urlStr string) (*url.URL, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - // Make relative URLs absolute using our discovery URL. - if !u.IsAbs() { - u = h.discoURL.ResolveReference(u) - } - - if u.Scheme != "https" && u.Scheme != "http" { - return nil, fmt.Errorf("unsupported scheme %s", u.Scheme) - } - if u.User != nil { - return nil, fmt.Errorf("embedded username/password information is not permitted") - } - - // Fragment part is irrelevant, since we're not a browser. - u.Fragment = "" - - return u, nil -} - -// VersionConstraints returns the contraints for a given service identifier -// (which should be of the form "servicename.vN") and product. -// -// When an exact (service and version) match is found, the constraints for -// that service are returned. -// -// When the requested version is not provided but the service is, we will -// search for all alternative versions. If mutliple alternative versions -// are found, the contrains of the latest available version are returned. -// -// When a service is not provided at all an error will be returned instead. -// -// When checkpoint is disabled or when a 404 is returned after making the -// HTTP call, an ErrNoVersionConstraints error will be returned. -func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { - svc, _, err := parseServiceID(id) - if err != nil { - return nil, err - } - - // Return early if checkpoint is disabled. - if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { - return nil, &ErrNoVersionConstraints{disabled: true} - } - - // No services supported for an empty Host. - if h == nil || h.services == nil { - return nil, &ErrServiceNotProvided{service: svc} - } - - // Try to get the service URL for the version service and - // return early if the service isn't provided by the host. - u, err := h.ServiceURL(versionServiceID) - if err != nil { - return nil, err - } - - // Check if we have an exact (service and version) match. - if _, ok := h.services[id].(string); !ok { - // If we don't have an exact match, we search for all matching - // services and then use the service ID of the latest version. - var services []string - for serviceID := range h.services { - if strings.HasPrefix(serviceID, svc+".") { - services = append(services, serviceID) - } - } - - if len(services) == 0 { - // No discovered services match the requested service. - return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} - } - - // Set id to the latest service ID we found. - var latest *version.Version - for _, serviceID := range services { - if _, ver, err := parseServiceID(serviceID); err == nil { - if latest == nil || latest.LessThan(ver) { - id = serviceID - latest = ver - } - } - } - } - - // Set a default timeout of 1 sec for the versions request (in milliseconds) - timeout := 1000 - if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { - timeout = v - } - - client := &http.Client{ - Transport: h.transport, - Timeout: time.Duration(timeout) * time.Millisecond, - } - - // Prepare the service URL by setting the service and product. - v := u.Query() - v.Set("product", product) - u.Path += id - u.RawQuery = v.Encode() - - // Create a new request. - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, fmt.Errorf("Failed to create version constraints request: %v", err) - } - req.Header.Set("Accept", "application/json") - - log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("Failed to request version constraints: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 { - return nil, &ErrNoVersionConstraints{disabled: false} - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) - } - - // Parse the constraints from the response body. - result := &Constraints{} - if err := json.NewDecoder(resp.Body).Decode(result); err != nil { - return nil, fmt.Errorf("Error parsing version constraints: %v", err) - } - - return result, nil -} - -func parseServiceID(id string) (string, *version.Version, error) { - parts := strings.SplitN(id, ".", 2) - if len(parts) != 2 { - return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) - } - - version, err := version.NewVersion(parts[1]) - if err != nil { - return "", nil, fmt.Errorf("Invalid service version: %v", err) - } - - return parts[0], version, nil -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go b/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go deleted file mode 100644 index 7e4a3856..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go +++ /dev/null @@ -1,30 +0,0 @@ -package disco - -import ( - "net/http" - - "github.com/hashicorp/go-cleanhttp" -) - -const DefaultUserAgent = "terraform-svchost/1.0" - -func defaultHttpTransport() http.RoundTripper { - t := cleanhttp.DefaultPooledTransport() - return &userAgentRoundTripper{ - innerRt: t, - userAgent: DefaultUserAgent, - } -} - -type userAgentRoundTripper struct { - innerRt http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - - return rt.innerRt.RoundTrip(req) -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go b/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go deleted file mode 100644 index 9308bbf7..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go +++ /dev/null @@ -1,178 +0,0 @@ -package disco - -import ( - "fmt" - "net/url" - "strings" - - "golang.org/x/oauth2" -) - -// OAuthClient represents an OAuth client configuration, which is used for -// unusual services that require an entire OAuth client configuration as part -// of their service discovery, rather than just a URL. -type OAuthClient struct { - // ID is the identifier for the client, to be used as "client_id" in - // OAuth requests. - ID string - - // Authorization URL is the URL of the authorization endpoint that must - // be used for this OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the authorization endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - AuthorizationURL *url.URL - - // Token URL is the URL of the token endpoint that must be used for this - // OAuth client, as defined in the OAuth2 specifications. - // - // Not all grant types use the token endpoint, so it may be omitted - // if none of the grant types in SupportedGrantTypes require it. - TokenURL *url.URL - - // MinPort and MaxPort define a range of TCP ports on localhost that this - // client is able to use as redirect_uri in an authorization request. - // Terraform will select a port from this range for the temporary HTTP - // server it creates to receive the authorization response, giving - // a URL like http://localhost:NNN/ where NNN is the selected port number. - // - // Terraform will reject any port numbers in this range less than 1024, - // to respect the common convention (enforced on some operating systems) - // that lower port numbers are reserved for "privileged" services. - MinPort, MaxPort uint16 - - // SupportedGrantTypes is a set of the grant types that the client may - // choose from. This includes an entry for each distinct type advertised - // by the server, even if a particular keyword is not supported by the - // current version of Terraform. - SupportedGrantTypes OAuthGrantTypeSet -} - -// Endpoint returns an oauth2.Endpoint value ready to be used with the oauth2 -// library, representing the URLs from the receiver. -func (c *OAuthClient) Endpoint() oauth2.Endpoint { - ep := oauth2.Endpoint{ - // We don't actually auth because we're not a server-based OAuth client, - // so this instead just means that we include client_id as an argument - // in our requests. - AuthStyle: oauth2.AuthStyleInParams, - } - - if c.AuthorizationURL != nil { - ep.AuthURL = c.AuthorizationURL.String() - } - if c.TokenURL != nil { - ep.TokenURL = c.TokenURL.String() - } - - return ep -} - -// OAuthGrantType is an enumeration of grant type strings that a host can -// advertise support for. -// -// Values of this type don't necessarily match with a known constant of the -// type, because they may represent grant type keywords defined in a later -// version of Terraform which this version doesn't yet know about. -type OAuthGrantType string - -const ( - // OAuthAuthzCodeGrant represents an authorization code grant, as - // defined in IETF RFC 6749 section 4.1. - OAuthAuthzCodeGrant = OAuthGrantType("authz_code") - - // OAuthOwnerPasswordGrant represents a resource owner password - // credentials grant, as defined in IETF RFC 6749 section 4.3. - OAuthOwnerPasswordGrant = OAuthGrantType("password") -) - -// UsesAuthorizationEndpoint returns true if the receiving grant type makes -// use of the authorization endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesAuthorizationEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return false - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// UsesTokenEndpoint returns true if the receiving grant type makes -// use of the token endpoint from the client configuration, and thus -// if the authorization endpoint ought to be required. -func (t OAuthGrantType) UsesTokenEndpoint() bool { - switch t { - case OAuthAuthzCodeGrant: - return true - case OAuthOwnerPasswordGrant: - return true - default: - // We'll default to false so that we don't impose any requirements - // on any grant type keywords that might be defined for future - // versions of Terraform. - return false - } -} - -// OAuthGrantTypeSet represents a set of OAuthGrantType values. -type OAuthGrantTypeSet map[OAuthGrantType]struct{} - -// NewOAuthGrantTypeSet constructs a new grant type set from the given list -// of grant type keyword strings. Any duplicates in the list are ignored. -func NewOAuthGrantTypeSet(keywords ...string) OAuthGrantTypeSet { - ret := make(OAuthGrantTypeSet, len(keywords)) - for _, kw := range keywords { - ret[OAuthGrantType(kw)] = struct{}{} - } - return ret -} - -// Has returns true if the given grant type is in the receiving set. -func (s OAuthGrantTypeSet) Has(t OAuthGrantType) bool { - _, ok := s[t] - return ok -} - -// RequiresAuthorizationEndpoint returns true if any of the grant types in -// the set are known to require an authorization endpoint. -func (s OAuthGrantTypeSet) RequiresAuthorizationEndpoint() bool { - for t := range s { - if t.UsesAuthorizationEndpoint() { - return true - } - } - return false -} - -// RequiresTokenEndpoint returns true if any of the grant types in -// the set are known to require a token endpoint. -func (s OAuthGrantTypeSet) RequiresTokenEndpoint() bool { - for t := range s { - if t.UsesTokenEndpoint() { - return true - } - } - return false -} - -// GoString implements fmt.GoStringer. -func (s OAuthGrantTypeSet) GoString() string { - var buf strings.Builder - i := 0 - buf.WriteString("disco.NewOAuthGrantTypeSet(") - for t := range s { - if i > 0 { - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%q", string(t)) - i++ - } - buf.WriteString(")") - return buf.String() -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/go.mod b/vendor/github.com/hashicorp/terraform-svchost/go.mod deleted file mode 100644 index 8f29e4ac..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/hashicorp/terraform-svchost - -go 1.12 - -require ( - github.com/google/go-cmp v0.3.1 - github.com/hashicorp/go-cleanhttp v0.5.1 - github.com/hashicorp/go-version v1.2.0 - github.com/zclconf/go-cty v1.1.0 - golang.org/x/net v0.0.0-20191009170851-d66e71096ffb - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 -) diff --git a/vendor/github.com/hashicorp/terraform-svchost/go.sum b/vendor/github.com/hashicorp/terraform-svchost/go.sum deleted file mode 100644 index 9ad1712f..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/go.sum +++ /dev/null @@ -1,36 +0,0 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go b/vendor/github.com/hashicorp/terraform-svchost/label_iter.go deleted file mode 100644 index af8ccbab..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go +++ /dev/null @@ -1,69 +0,0 @@ -package svchost - -import ( - "strings" -) - -// A labelIter allows iterating over domain name labels. -// -// This type is copied from golang.org/x/net/idna, where it is used -// to segment hostnames into their separate labels for analysis. We use -// it for the same purpose here, in ForComparison. -type labelIter struct { - orig string - slice []string - curStart int - curEnd int - i int -} - -func (l *labelIter) reset() { - l.curStart = 0 - l.curEnd = 0 - l.i = 0 -} - -func (l *labelIter) done() bool { - return l.curStart >= len(l.orig) -} - -func (l *labelIter) result() string { - if l.slice != nil { - return strings.Join(l.slice, ".") - } - return l.orig -} - -func (l *labelIter) label() string { - if l.slice != nil { - return l.slice[l.i] - } - p := strings.IndexByte(l.orig[l.curStart:], '.') - l.curEnd = l.curStart + p - if p == -1 { - l.curEnd = len(l.orig) - } - return l.orig[l.curStart:l.curEnd] -} - -// next sets the value to the next label. It skips the last label if it is empty. -func (l *labelIter) next() { - l.i++ - if l.slice != nil { - if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { - l.curStart = len(l.orig) - } - } else { - l.curStart = l.curEnd + 1 - if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { - l.curStart = len(l.orig) - } - } -} - -func (l *labelIter) set(s string) { - if l.slice == nil { - l.slice = strings.Split(l.orig, ".") - } - l.slice[l.i] = s -} diff --git a/vendor/github.com/hashicorp/terraform-svchost/svchost.go b/vendor/github.com/hashicorp/terraform-svchost/svchost.go deleted file mode 100644 index 4060b767..00000000 --- a/vendor/github.com/hashicorp/terraform-svchost/svchost.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package svchost deals with the representations of the so-called "friendly -// hostnames" that we use to represent systems that provide Terraform-native -// remote services, such as module registry, remote operations, etc. -// -// Friendly hostnames are specified such that, as much as possible, they -// are consistent with how web browsers think of hostnames, so that users -// can bring their intuitions about how hostnames behave when they access -// a Terraform Enterprise instance's web UI (or indeed any other website) -// and have this behave in a similar way. -package svchost - -import ( - "errors" - "fmt" - "strconv" - "strings" - - "golang.org/x/net/idna" -) - -// Hostname is specialized name for string that indicates that the string -// has been converted to (or was already in) the storage and comparison form. -// -// Hostname values are not suitable for display in the user-interface. Use -// the ForDisplay method to obtain a form suitable for display in the UI. -// -// Unlike user-supplied hostnames, strings of type Hostname (assuming they -// were constructed by a function within this package) can be compared for -// equality using the standard Go == operator. -type Hostname string - -// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that -// a domain name label is in "punycode" form. -const acePrefix = "xn--" - -// displayProfile is a very liberal idna profile that we use to do -// normalization for display without imposing validation rules. -var displayProfile = idna.New( - idna.MapForLookup(), - idna.Transitional(true), -) - -// ForDisplay takes a user-specified hostname and returns a normalized form of -// it suitable for display in the UI. -// -// If the input is so invalid that no normalization can be performed then -// this will return the input, assuming that the caller still wants to -// display _something_. This function is, however, more tolerant than the -// other functions in this package and will make a best effort to prepare -// _any_ given hostname for display. -// -// For validation, use either IsValid (for explicit validation) or -// ForComparison (which implicitly validates, returning an error if invalid). -func ForDisplay(given string) string { - var portPortion string - if colonPos := strings.Index(given, ":"); colonPos != -1 { - given, portPortion = given[:colonPos], given[colonPos:] - } - portPortion, _ = normalizePortPortion(portPortion) - - ascii, err := displayProfile.ToASCII(given) - if err != nil { - return given + portPortion - } - display, err := displayProfile.ToUnicode(ascii) - if err != nil { - return given + portPortion - } - return display + portPortion -} - -// IsValid returns true if the given user-specified hostname is a valid -// service hostname. -// -// Validity is determined by complying with the RFC 5891 requirements for -// names that are valid for domain lookup (section 5), with the additional -// requirement that user-supplied forms must not _already_ contain -// Punycode segments. -func IsValid(given string) bool { - _, err := ForComparison(given) - return err == nil -} - -// ForComparison takes a user-specified hostname and returns a normalized -// form of it suitable for storage and comparison. The result is not suitable -// for display to end-users because it uses Punycode to represent non-ASCII -// characters, and this form is unreadable for non-ASCII-speaking humans. -// -// The result is typed as Hostname -- a specialized name for string -- so that -// other APIs can make it clear within the type system whether they expect a -// user-specified or display-form hostname or a value already normalized for -// comparison. -// -// The returned Hostname is not valid if the returned error is non-nil. -func ForComparison(given string) (Hostname, error) { - var portPortion string - if colonPos := strings.Index(given, ":"); colonPos != -1 { - given, portPortion = given[:colonPos], given[colonPos:] - } - - var err error - portPortion, err = normalizePortPortion(portPortion) - if err != nil { - return Hostname(""), err - } - - if given == "" { - return Hostname(""), fmt.Errorf("empty string is not a valid hostname") - } - - // First we'll apply our additional constraint that Punycode must not - // be given directly by the user. This is not an IDN specification - // requirement, but we prohibit it to force users to use human-readable - // hostname forms within Terraform configuration. - labels := labelIter{orig: given} - for ; !labels.done(); labels.next() { - label := labels.label() - if label == "" { - return Hostname(""), fmt.Errorf( - "hostname contains empty label (two consecutive periods)", - ) - } - if strings.HasPrefix(label, acePrefix) { - return Hostname(""), fmt.Errorf( - "hostname label %q specified in punycode format; service hostnames must be given in unicode", - label, - ) - } - } - - result, err := idna.Lookup.ToASCII(given) - if err != nil { - return Hostname(""), err - } - return Hostname(result + portPortion), nil -} - -// ForDisplay returns a version of the receiver that is appropriate for display -// in the UI. This includes converting any punycode labels to their -// corresponding Unicode characters. -// -// A round-trip through ForComparison and this ForDisplay method does not -// guarantee the same result as calling this package's top-level ForDisplay -// function, since a round-trip through the Hostname type implies stricter -// handling than we do when doing basic display-only processing. -func (h Hostname) ForDisplay() string { - given := string(h) - var portPortion string - if colonPos := strings.Index(given, ":"); colonPos != -1 { - given, portPortion = given[:colonPos], given[colonPos:] - } - // We don't normalize the port portion here because we assume it's - // already been normalized on the way in. - - result, err := idna.Lookup.ToUnicode(given) - if err != nil { - // Should never happen, since type Hostname indicates that a string - // passed through our validation rules. - panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) - } - return result + portPortion -} - -func (h Hostname) String() string { - return string(h) -} - -func (h Hostname) GoString() string { - return fmt.Sprintf("svchost.Hostname(%q)", string(h)) -} - -// normalizePortPortion attempts to normalize the "port portion" of a hostname, -// which begins with the first colon in the hostname and should be followed -// by a string of decimal digits. -// -// If the port portion is valid, a normalized version of it is returned along -// with a nil error. -// -// If the port portion is invalid, the input string is returned verbatim along -// with a non-nil error. -// -// An empty string is a valid port portion representing the absence of a port. -// If non-empty, the first character must be a colon. -func normalizePortPortion(s string) (string, error) { - if s == "" { - return s, nil - } - - if s[0] != ':' { - // should never happen, since caller tends to guarantee the presence - // of a colon due to how it's extracted from the string. - return s, errors.New("port portion is missing its initial colon") - } - - numStr := s[1:] - num, err := strconv.Atoi(numStr) - if err != nil { - return s, errors.New("port portion contains non-digit characters") - } - if num == 443 { - return "", nil // ":443" is the default - } - if num > 65535 { - return s, errors.New("port number is greater than 65535") - } - return fmt.Sprintf(":%d", num), nil -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go deleted file mode 100644 index 90a5faf0..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// CountAttr is the address of an attribute of the "count" object in -// the interpolation scope, like "count.index". -type CountAttr struct { - referenceable - Name string -} - -func (ca CountAttr) String() string { - return "count." + ca.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go deleted file mode 100644 index 7a638503..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// ForEachAttr is the address of an attribute referencing the current "for_each" object in -// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" -type ForEachAttr struct { - referenceable - Name string -} - -func (f ForEachAttr) String() string { - return "each." + f.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go deleted file mode 100644 index 79829f96..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go +++ /dev/null @@ -1,41 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// InputVariable is the address of an input variable. -type InputVariable struct { - referenceable - Name string -} - -func (v InputVariable) String() string { - return "var." + v.Name -} - -// AbsInputVariableInstance is the address of an input variable within a -// particular module instance. -type AbsInputVariableInstance struct { - Module ModuleInstance - Variable InputVariable -} - -// InputVariable returns the absolute address of the input variable of the -// given name inside the receiving module instance. -func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { - return AbsInputVariableInstance{ - Module: m, - Variable: InputVariable{ - Name: name, - }, - } -} - -func (v AbsInputVariableInstance) String() string { - if len(v.Module) == 0 { - return v.Variable.String() - } - - return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go deleted file mode 100644 index ff128be5..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go +++ /dev/null @@ -1,135 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// InstanceKey represents the key of an instance within an object that -// contains multiple instances due to using "count" or "for_each" arguments -// in configuration. -// -// IntKey and StringKey are the two implementations of this type. No other -// implementations are allowed. The single instance of an object that _isn't_ -// using "count" or "for_each" is represented by NoKey, which is a nil -// InstanceKey. -type InstanceKey interface { - instanceKeySigil() - String() string - - // Value returns the cty.Value of the appropriate type for the InstanceKey - // value. - Value() cty.Value -} - -// ParseInstanceKey returns the instance key corresponding to the given value, -// which must be known and non-null. -// -// If an unknown or null value is provided then this function will panic. This -// function is intended to deal with the values that would naturally be found -// in a hcl.TraverseIndex, which (when parsed from source, at least) can never -// contain unknown or null values. -func ParseInstanceKey(key cty.Value) (InstanceKey, error) { - switch key.Type() { - case cty.String: - return StringKey(key.AsString()), nil - case cty.Number: - var idx int - err := gocty.FromCtyValue(key, &idx) - return IntKey(idx), err - default: - return NoKey, fmt.Errorf("either a string or an integer is required") - } -} - -// NoKey represents the absense of an InstanceKey, for the single instance -// of a configuration object that does not use "count" or "for_each" at all. -var NoKey InstanceKey - -// IntKey is the InstanceKey representation representing integer indices, as -// used when the "count" argument is specified or if for_each is used with -// a sequence type. -type IntKey int - -func (k IntKey) instanceKeySigil() { -} - -func (k IntKey) String() string { - return fmt.Sprintf("[%d]", int(k)) -} - -func (k IntKey) Value() cty.Value { - return cty.NumberIntVal(int64(k)) -} - -// StringKey is the InstanceKey representation representing string indices, as -// used when the "for_each" argument is specified with a map or object type. -type StringKey string - -func (k StringKey) instanceKeySigil() { -} - -func (k StringKey) String() string { - // FIXME: This isn't _quite_ right because Go's quoted string syntax is - // slightly different than HCL's, but we'll accept it for now. - return fmt.Sprintf("[%q]", string(k)) -} - -func (k StringKey) Value() cty.Value { - return cty.StringVal(string(k)) -} - -// InstanceKeyLess returns true if the first given instance key i should sort -// before the second key j, and false otherwise. -func InstanceKeyLess(i, j InstanceKey) bool { - iTy := instanceKeyType(i) - jTy := instanceKeyType(j) - - switch { - case i == j: - return false - case i == NoKey: - return true - case j == NoKey: - return false - case iTy != jTy: - // The ordering here is arbitrary except that we want NoKeyType - // to sort before the others, so we'll just use the enum values - // of InstanceKeyType here (where NoKey is zero, sorting before - // any other). - return uint32(iTy) < uint32(jTy) - case iTy == IntKeyType: - return int(i.(IntKey)) < int(j.(IntKey)) - case iTy == StringKeyType: - return string(i.(StringKey)) < string(j.(StringKey)) - default: - // Shouldn't be possible to get down here in practice, since the - // above is exhaustive. - return false - } -} - -func instanceKeyType(k InstanceKey) InstanceKeyType { - if _, ok := k.(StringKey); ok { - return StringKeyType - } - if _, ok := k.(IntKey); ok { - return IntKeyType - } - return NoKeyType -} - -// InstanceKeyType represents the different types of instance key that are -// supported. Usually it is sufficient to simply type-assert an InstanceKey -// value to either IntKey or StringKey, but this type and its values can be -// used to represent the types themselves, rather than specific values -// of those types. -type InstanceKeyType rune - -const ( - NoKeyType InstanceKeyType = 0 - IntKeyType InstanceKeyType = 'I' - StringKeyType InstanceKeyType = 'S' -) diff --git a/vendor/github.com/hashicorp/terraform/addrs/local_value.go b/vendor/github.com/hashicorp/terraform/addrs/local_value.go deleted file mode 100644 index 61a07b9c..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/local_value.go +++ /dev/null @@ -1,48 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// LocalValue is the address of a local value. -type LocalValue struct { - referenceable - Name string -} - -func (v LocalValue) String() string { - return "local." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: v, - } -} - -// AbsLocalValue is the absolute address of a local value within a module instance. -type AbsLocalValue struct { - Module ModuleInstance - LocalValue LocalValue -} - -// LocalValue returns the absolute address of a local value of the given -// name within the receiving module instance. -func (m ModuleInstance) LocalValue(name string) AbsLocalValue { - return AbsLocalValue{ - Module: m, - LocalValue: LocalValue{ - Name: name, - }, - } -} - -func (v AbsLocalValue) String() string { - if len(v.Module) == 0 { - return v.LocalValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module.go b/vendor/github.com/hashicorp/terraform/addrs/module.go deleted file mode 100644 index 6420c630..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/module.go +++ /dev/null @@ -1,75 +0,0 @@ -package addrs - -import ( - "strings" -) - -// Module is an address for a module call within configuration. This is -// the static counterpart of ModuleInstance, representing a traversal through -// the static module call tree in configuration and does not take into account -// the potentially-multiple instances of a module that might be created by -// "count" and "for_each" arguments within those calls. -// -// This type should be used only in very specialized cases when working with -// the static module call tree. Type ModuleInstance is appropriate in more cases. -// -// Although Module is a slice, it should be treated as immutable after creation. -type Module []string - -// RootModule is the module address representing the root of the static module -// call tree, which is also the zero value of Module. -// -// Note that this is not the root of the dynamic module tree, which is instead -// represented by RootModuleInstance. -var RootModule Module - -// IsRoot returns true if the receiver is the address of the root module, -// or false otherwise. -func (m Module) IsRoot() bool { - return len(m) == 0 -} - -func (m Module) String() string { - if len(m) == 0 { - return "" - } - return strings.Join([]string(m), ".") -} - -// Child returns the address of a child call in the receiver, identified by the -// given name. -func (m Module) Child(name string) Module { - ret := make(Module, 0, len(m)+1) - ret = append(ret, m...) - return append(ret, name) -} - -// Parent returns the address of the parent module of the receiver, or the -// receiver itself if there is no parent (if it's the root module address). -func (m Module) Parent() Module { - if len(m) == 0 { - return m - } - return m[:len(m)-1] -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m Module) Call() (Module, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - caller, callName := m[:len(m)-1], m[len(m)-1] - return caller, ModuleCall{ - Name: callName, - } -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_call.go b/vendor/github.com/hashicorp/terraform/addrs/module_call.go deleted file mode 100644 index 09596cc8..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/module_call.go +++ /dev/null @@ -1,81 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// ModuleCall is the address of a call from the current module to a child -// module. -// -// There is no "Abs" version of ModuleCall because an absolute module path -// is represented by ModuleInstance. -type ModuleCall struct { - referenceable - Name string -} - -func (c ModuleCall) String() string { - return "module." + c.Name -} - -// Instance returns the address of an instance of the receiver identified by -// the given key. -func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { - return ModuleCallInstance{ - Call: c, - Key: key, - } -} - -// ModuleCallInstance is the address of one instance of a module created from -// a module call, which might create multiple instances using "count" or -// "for_each" arguments. -type ModuleCallInstance struct { - referenceable - Call ModuleCall - Key InstanceKey -} - -func (c ModuleCallInstance) String() string { - if c.Key == NoKey { - return c.Call.String() - } - return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) -} - -// ModuleInstance returns the address of the module instance that corresponds -// to the receiving call instance when resolved in the given calling module. -// In other words, it returns the child module instance that the receving -// call instance creates. -func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { - return caller.Child(c.Call.Name, c.Key) -} - -// Output returns the address of an output of the receiver identified by its -// name. -func (c ModuleCallInstance) Output(name string) ModuleCallOutput { - return ModuleCallOutput{ - Call: c, - Name: name, - } -} - -// ModuleCallOutput is the address of a particular named output produced by -// an instance of a module call. -type ModuleCallOutput struct { - referenceable - Call ModuleCallInstance - Name string -} - -func (co ModuleCallOutput) String() string { - return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) -} - -// AbsOutputValue returns the absolute output value address that corresponds -// to the receving module call output address, once resolved in the given -// calling module. -func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { - moduleAddr := co.Call.ModuleInstance(caller) - return moduleAddr.OutputValue(co.Name) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go deleted file mode 100644 index d26aee27..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go +++ /dev/null @@ -1,415 +0,0 @@ -package addrs - -import ( - "bytes" - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" - - "github.com/hashicorp/terraform/tfdiags" -) - -// ModuleInstance is an address for a particular module instance within the -// dynamic module tree. This is an extension of the static traversals -// represented by type Module that deals with the possibility of a single -// module call producing multiple instances via the "count" and "for_each" -// arguments. -// -// Although ModuleInstance is a slice, it should be treated as immutable after -// creation. -type ModuleInstance []ModuleInstanceStep - -var ( - _ Targetable = ModuleInstance(nil) -) - -func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { - mi, remain, diags := parseModuleInstancePrefix(traversal) - if len(remain) != 0 { - if len(remain) == len(traversal) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "A module instance address must begin with \"module.\".", - Subject: remain.SourceRange().Ptr(), - }) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance address", - Detail: "The module instance address is followed by additional invalid content.", - Subject: remain.SourceRange().Ptr(), - }) - } - } - return mi, diags -} - -// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseModuleInstance. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - addr, addrDiags := ParseModuleInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { - remain := traversal - var mi ModuleInstance - var diags tfdiags.Diagnostics - - for len(remain) > 0 { - var next string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - next = tt.Name - case hcl.TraverseAttr: - next = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Module address prefix must be followed by dot and then a name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - - if next != "module" { - break - } - - kwRange := remain[0].SourceRange() - remain = remain[1:] - // If we have the prefix "module" then we should be followed by an - // module call name, as an attribute, and then optionally an index step - // giving the instance key. - if len(remain) == 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: &kwRange, - }) - break - } - - var moduleName string - switch tt := remain[0].(type) { - case hcl.TraverseAttr: - moduleName = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Prefix \"module.\" must be followed by a module name.", - Subject: remain[0].SourceRange().Ptr(), - }) - break - } - remain = remain[1:] - step := ModuleInstanceStep{ - Name: moduleName, - } - - if len(remain) > 0 { - if idx, ok := remain[0].(hcl.TraverseIndex); ok { - remain = remain[1:] - - switch idx.Key.Type() { - case cty.String: - step.InstanceKey = StringKey(idx.Key.AsString()) - case cty.Number: - var idxInt int - err := gocty.FromCtyValue(idx.Key, &idxInt) - if err == nil { - step.InstanceKey = IntKey(idxInt) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: fmt.Sprintf("Invalid module index: %s.", err), - Subject: idx.SourceRange().Ptr(), - }) - } - default: - // Should never happen, because no other types are allowed in traversal indices. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address operator", - Detail: "Invalid module key: must be either a string or an integer.", - Subject: idx.SourceRange().Ptr(), - }) - } - } - } - - mi = append(mi, step) - } - - var retRemain hcl.Traversal - if len(remain) > 0 { - retRemain = make(hcl.Traversal, len(remain)) - copy(retRemain, remain) - // The first element here might be either a TraverseRoot or a - // TraverseAttr, depending on whether we had a module address on the - // front. To make life easier for callers, we'll normalize to always - // start with a TraverseRoot. - if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { - retRemain[0] = hcl.TraverseRoot{ - Name: tt.Name, - SrcRange: tt.SrcRange, - } - } - } - - return mi, retRemain, diags -} - -// UnkeyedInstanceShim is a shim method for converting a Module address to the -// equivalent ModuleInstance address that assumes that no modules have -// keyed instances. -// -// This is a temporary allowance for the fact that Terraform does not presently -// support "count" and "for_each" on modules, and thus graph building code that -// derives graph nodes from configuration must just assume unkeyed modules -// in order to construct the graph. At a later time when "count" and "for_each" -// support is added for modules, all callers of this method will need to be -// reworked to allow for keyed module instances. -func (m Module) UnkeyedInstanceShim() ModuleInstance { - path := make(ModuleInstance, len(m)) - for i, name := range m { - path[i] = ModuleInstanceStep{Name: name} - } - return path -} - -// ModuleInstanceStep is a single traversal step through the dynamic module -// tree. It is used only as part of ModuleInstance. -type ModuleInstanceStep struct { - Name string - InstanceKey InstanceKey -} - -// RootModuleInstance is the module instance address representing the root -// module, which is also the zero value of ModuleInstance. -var RootModuleInstance ModuleInstance - -// IsRoot returns true if the receiver is the address of the root module instance, -// or false otherwise. -func (m ModuleInstance) IsRoot() bool { - return len(m) == 0 -} - -// Child returns the address of a child module instance of the receiver, -// identified by the given name and key. -func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { - ret := make(ModuleInstance, 0, len(m)+1) - ret = append(ret, m...) - return append(ret, ModuleInstanceStep{ - Name: name, - InstanceKey: key, - }) -} - -// Parent returns the address of the parent module instance of the receiver, or -// the receiver itself if there is no parent (if it's the root module address). -func (m ModuleInstance) Parent() ModuleInstance { - if len(m) == 0 { - return m - } - return m[:len(m)-1] -} - -// String returns a string representation of the receiver, in the format used -// within e.g. user-provided resource addresses. -// -// The address of the root module has the empty string as its representation. -func (m ModuleInstance) String() string { - var buf bytes.Buffer - sep := "" - for _, step := range m { - buf.WriteString(sep) - buf.WriteString("module.") - buf.WriteString(step.Name) - if step.InstanceKey != NoKey { - buf.WriteString(step.InstanceKey.String()) - } - sep = "." - } - return buf.String() -} - -// Equal returns true if the receiver and the given other value -// contains the exact same parts. -func (m ModuleInstance) Equal(o ModuleInstance) bool { - return m.String() == o.String() -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (m ModuleInstance) Less(o ModuleInstance) bool { - if len(m) != len(o) { - // Shorter path sorts first. - return len(m) < len(o) - } - - for i := range m { - mS, oS := m[i], o[i] - switch { - case mS.Name != oS.Name: - return mS.Name < oS.Name - case mS.InstanceKey != oS.InstanceKey: - return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) - } - } - - return false -} - -// Ancestors returns a slice containing the receiver and all of its ancestor -// module instances, all the way up to (and including) the root module. -// The result is ordered by depth, with the root module always first. -// -// Since the result always includes the root module, a caller may choose to -// ignore it by slicing the result with [1:]. -func (m ModuleInstance) Ancestors() []ModuleInstance { - ret := make([]ModuleInstance, 0, len(m)+1) - for i := 0; i <= len(m); i++ { - ret = append(ret, m[:i]) - } - return ret -} - -// IsAncestor returns true if the receiver is an ancestor of the given -// other value. -func (m ModuleInstance) IsAncestor(o ModuleInstance) bool { - // Longer or equal sized paths means the receiver cannot - // be an ancestor of the given module insatnce. - if len(m) >= len(o) { - return false - } - - for i, ms := range m { - if ms.Name != o[i].Name { - return false - } - if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey { - return false - } - } - - return true -} - -// Call returns the module call address that corresponds to the given module -// instance, along with the address of the module instance that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// A single module call can produce potentially many module instances, so the -// result discards any instance key that might be present on the last step -// of the instance. To retain this, use CallInstance instead. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCall and then returns a slice of the receiever that excludes that -// last part. This is just a convenience for situations where a call address -// is required, such as when dealing with *Reference and Referencable values. -func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { - if len(m) == 0 { - panic("cannot produce ModuleCall for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCall{ - Name: lastStep.Name, - } -} - -// CallInstance returns the module call instance address that corresponds to -// the given module instance, along with the address of the module instance -// that contains it. -// -// There is no call for the root module, so this method will panic if called -// on the root module address. -// -// In practice, this just turns the last element of the receiver into a -// ModuleCallInstance and then returns a slice of the receiever that excludes -// that last part. This is just a convenience for situations where a call\ -// address is required, such as when dealing with *Reference and Referencable -// values. -func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { - if len(m) == 0 { - panic("cannot produce ModuleCallInstance for root module") - } - - inst, lastStep := m[:len(m)-1], m[len(m)-1] - return inst, ModuleCallInstance{ - Call: ModuleCall{ - Name: lastStep.Name, - }, - Key: lastStep.InstanceKey, - } -} - -// TargetContains implements Targetable by returning true if the given other -// address either matches the receiver, is a sub-module-instance of the -// receiver, or is a targetable absolute address within a module that -// is contained within the reciever. -func (m ModuleInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case ModuleInstance: - if len(to) < len(m) { - // Can't be contained if the path is shorter - return false - } - // Other is contained if its steps match for the length of our own path. - for i, ourStep := range m { - otherStep := to[i] - if ourStep != otherStep { - return false - } - } - // If we fall out here then the prefixed matched, so it's contained. - return true - - case AbsResource: - return m.TargetContains(to.Module) - - case AbsResourceInstance: - return m.TargetContains(to.Module) - - default: - return false - } -} - -func (m ModuleInstance) targetableSigil() { - // ModuleInstance is targetable -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/output_value.go b/vendor/github.com/hashicorp/terraform/addrs/output_value.go deleted file mode 100644 index bcd923ac..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/output_value.go +++ /dev/null @@ -1,75 +0,0 @@ -package addrs - -import ( - "fmt" -) - -// OutputValue is the address of an output value, in the context of the module -// that is defining it. -// -// This is related to but separate from ModuleCallOutput, which represents -// a module output from the perspective of its parent module. Since output -// values cannot be represented from the module where they are defined, -// OutputValue is not Referenceable, while ModuleCallOutput is. -type OutputValue struct { - Name string -} - -func (v OutputValue) String() string { - return "output." + v.Name -} - -// Absolute converts the receiver into an absolute address within the given -// module instance. -func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: v, - } -} - -// AbsOutputValue is the absolute address of an output value within a module instance. -// -// This represents an output globally within the namespace of a particular -// configuration. It is related to but separate from ModuleCallOutput, which -// represents a module output from the perspective of its parent module. -type AbsOutputValue struct { - Module ModuleInstance - OutputValue OutputValue -} - -// OutputValue returns the absolute address of an output value of the given -// name within the receiving module instance. -func (m ModuleInstance) OutputValue(name string) AbsOutputValue { - return AbsOutputValue{ - Module: m, - OutputValue: OutputValue{ - Name: name, - }, - } -} - -func (v AbsOutputValue) String() string { - if v.Module.IsRoot() { - return v.OutputValue.String() - } - return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) -} - -// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, -// returning also the module instance that the ModuleCallOutput is relative -// to. -// -// The root module does not have a call, and so this method cannot be used -// with outputs in the root module, and will panic in that case. -func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { - if v.Module.IsRoot() { - panic("ReferenceFromCall used with root module output") - } - - caller, call := v.Module.CallInstance() - return caller, ModuleCallOutput{ - Call: call, - Name: v.OutputValue.Name, - } -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go deleted file mode 100644 index d5142690..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go +++ /dev/null @@ -1,346 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/tfdiags" -) - -// Reference describes a reference to an address with source location -// information. -type Reference struct { - Subject Referenceable - SourceRange tfdiags.SourceRange - Remaining hcl.Traversal -} - -// ParseRef attempts to extract a referencable address from the prefix of the -// given traversal, which must be an absolute traversal or this function -// will panic. -// -// If no error diagnostics are returned, the returned reference includes the -// address that was extracted, the source range it was extracted from, and any -// remaining relative traversal that was not consumed as part of the -// reference. -// -// If error diagnostics are returned then the Reference value is invalid and -// must not be used. -func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - ref, diags := parseRef(traversal) - - // Normalize a little to make life easier for callers. - if ref != nil { - if len(ref.Remaining) == 0 { - ref.Remaining = nil - } - } - - return ref, diags -} - -// ParseRefStr is a helper wrapper around ParseRef that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseRef. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned reference may be nil or incomplete. -func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - ref, targetDiags := ParseRef(traversal) - diags = diags.Append(targetDiags) - return ref, diags -} - -func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - switch root { - - case "count": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: CountAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "each": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: ForEachAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "data": - if len(traversal) < 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, - Subject: traversal.SourceRange().Ptr(), - }) - return nil, diags - } - remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser - return parseResourceRef(DataResourceMode, rootRange, remain) - - case "local": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: LocalValue{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "module": - callName, callRange, remain, diags := parseSingleAttrRef(traversal) - if diags.HasErrors() { - return nil, diags - } - - // A traversal starting with "module" can either be a reference to - // an entire module instance or to a single output from a module - // instance, depending on what we find after this introducer. - - callInstance := ModuleCallInstance{ - Call: ModuleCall{ - Name: callName, - }, - Key: NoKey, - } - - if len(remain) == 0 { - // Reference to an entire module instance. Might alternatively - // be a reference to a collection of instances of a particular - // module, but the caller will need to deal with that ambiguity - // since we don't have enough context here. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(callRange), - Remaining: remain, - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - callInstance.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - - if len(remain) == 0 { - // Also a reference to an entire module instance, but we have a key - // now. - return &Reference{ - Subject: callInstance, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), - Remaining: remain, - }, diags - } - } - - if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { - remain = remain[1:] - return &Reference{ - Subject: ModuleCallOutput{ - Name: attrTrav.Name, - Call: callInstance, - }, - SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), - Remaining: remain, - }, diags - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: "Module instance objects do not support this operation.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - - case "path": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: PathAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "self": - return &Reference{ - Subject: Self, - SourceRange: tfdiags.SourceRangeFromHCL(rootRange), - Remaining: traversal[1:], - }, diags - - case "terraform": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: TerraformAttr{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - case "var": - name, rng, remain, diags := parseSingleAttrRef(traversal) - return &Reference{ - Subject: InputVariable{Name: name}, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags - - default: - return parseResourceRef(ManagedResourceMode, rootRange, traversal) - } -} - -func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, - Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - // If it isn't a TraverseRoot then it must be a "data" reference. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: `The "data" object does not support this operation.`, - Subject: traversal[0].SourceRange().Ptr(), - }) - return nil, diags - } - - attrTrav, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - var what string - switch mode { - case DataResourceMode: - what = "data source" - default: - what = "resource type" - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), - Subject: traversal[1].SourceRange().Ptr(), - }) - return nil, diags - } - name = attrTrav.Name - rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) - remain := traversal[2:] - - resourceAddr := Resource{ - Mode: mode, - Type: typeName, - Name: name, - } - resourceInstAddr := ResourceInstance{ - Resource: resourceAddr, - Key: NoKey, - } - - if len(remain) == 0 { - // This might actually be a reference to the collection of all instances - // of the resource, but we don't have enough context here to decide - // so we'll let the caller resolve that ambiguity. - return &Reference{ - Subject: resourceAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - }, diags - } - - if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { - var err error - resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid index key", - Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), - Subject: &idxTrav.SrcRange, - }) - return nil, diags - } - remain = remain[1:] - rng = hcl.RangeBetween(rng, idxTrav.SrcRange) - } - - return &Reference{ - Subject: resourceInstAddr, - SourceRange: tfdiags.SourceRangeFromHCL(rng), - Remaining: remain, - }, diags -} - -func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - root := traversal.RootName() - rootRange := traversal[0].SourceRange() - - if len(traversal) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), - Subject: &rootRange, - }) - return "", hcl.Range{}, nil, diags - } - if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { - return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference", - Detail: fmt.Sprintf("The %q object does not support this operation.", root), - Subject: traversal[1].SourceRange().Ptr(), - }) - return "", hcl.Range{}, nil, diags -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go deleted file mode 100644 index c308525f..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go +++ /dev/null @@ -1,318 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/tfdiags" -) - -// Target describes a targeted address with source location information. -type Target struct { - Subject Targetable - SourceRange tfdiags.SourceRange -} - -// ParseTarget attempts to interpret the given traversal as a targetable -// address. The given traversal must be absolute, or this function will -// panic. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the Target value is invalid and -// must not be used. -func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { - path, remain, diags := parseModuleInstancePrefix(traversal) - if diags.HasErrors() { - return nil, diags - } - - rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) - - if len(remain) == 0 { - return &Target{ - Subject: path, - SourceRange: rng, - }, diags - } - - mode := ManagedResourceMode - if remain.RootName() == "data" { - mode = DataResourceMode - remain = remain[1:] - } - - if len(remain) < 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource specification must include a resource type and name.", - Subject: remain.SourceRange().Ptr(), - }) - return nil, diags - } - - var typeName, name string - switch tt := remain[0].(type) { - case hcl.TraverseRoot: - typeName = tt.Name - case hcl.TraverseAttr: - typeName = tt.Name - default: - switch mode { - case ManagedResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource type name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - case DataResourceMode: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A data source name is required.", - Subject: remain[0].SourceRange().Ptr(), - }) - default: - panic("unknown mode") - } - return nil, diags - } - - switch tt := remain[1].(type) { - case hcl.TraverseAttr: - name = tt.Name - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource name is required.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - var subject Targetable - remain = remain[2:] - switch len(remain) { - case 0: - subject = path.Resource(mode, typeName, name) - case 1: - if tt, ok := remain[0].(hcl.TraverseIndex); ok { - key, err := ParseInstanceKey(tt.Key) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - - subject = path.ResourceInstance(mode, typeName, name, key) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Resource instance key must be given in square brackets.", - Subject: remain[0].SourceRange().Ptr(), - }) - return nil, diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "Unexpected extra operators after address.", - Subject: remain[1].SourceRange().Ptr(), - }) - return nil, diags - } - - return &Target{ - Subject: subject, - SourceRange: rng, - }, diags -} - -// ParseTargetStr is a helper wrapper around ParseTarget that takes a string -// and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a target string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseTarget. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned target may be nil or incomplete. -func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return nil, diags - } - - target, targetDiags := ParseTarget(traversal) - diags = diags.Append(targetDiags) - return target, diags -} - -// ParseAbsResource attempts to interpret the given traversal as an absolute -// resource address, using the same syntax as expected by ParseTarget. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the AbsResource value is invalid and -// must not be used. -func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) { - addr, diags := ParseTarget(traversal) - if diags.HasErrors() { - return AbsResource{}, diags - } - - switch tt := addr.Subject.(type) { - - case AbsResource: - return tt, diags - - case AbsResourceInstance: // Catch likely user error with specialized message - // Assume that the last element of the traversal must be the index, - // since that's required for a valid resource instance address. - indexStep := traversal[len(traversal)-1] - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.", - Subject: indexStep.SourceRange().Ptr(), - }) - return AbsResource{}, diags - - case ModuleInstance: // Catch likely user error with specialized message - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here. The module path must be followed by a resource specification.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResource{}, diags - - default: // Generic message for other address types - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResource{}, diags - - } -} - -// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a -// string and parses it with the HCL native syntax traversal parser before -// interpreting it. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address may be incomplete. -// -// Since this function has no context about the source of the given string, -// any returned diagnostics will not have meaningful source location -// information. -func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsResource{}, diags - } - - addr, addrDiags := ParseAbsResource(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ParseAbsResourceInstance attempts to interpret the given traversal as an -// absolute resource instance address, using the same syntax as expected by -// ParseTarget. -// -// If no error diagnostics are returned, the returned target includes the -// address that was extracted and the source range it was extracted from. -// -// If error diagnostics are returned then the AbsResource value is invalid and -// must not be used. -func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { - addr, diags := ParseTarget(traversal) - if diags.HasErrors() { - return AbsResourceInstance{}, diags - } - - switch tt := addr.Subject.(type) { - - case AbsResource: - return tt.Instance(NoKey), diags - - case AbsResourceInstance: - return tt, diags - - case ModuleInstance: // Catch likely user error with specialized message - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - default: // Generic message for other address types - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid address", - Detail: "A resource address is required here.", - Subject: traversal.SourceRange().Ptr(), - }) - return AbsResourceInstance{}, diags - - } -} - -// ParseAbsResourceInstanceStr is a helper wrapper around -// ParseAbsResourceInstance that takes a string and parses it with the HCL -// native syntax traversal parser before interpreting it. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address may be incomplete. -// -// Since this function has no context about the source of the given string, -// any returned diagnostics will not have meaningful source location -// information. -func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsResourceInstance{}, diags - } - - addr, addrDiags := ParseAbsResourceInstance(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go deleted file mode 100644 index cfc13f4b..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// PathAttr is the address of an attribute of the "path" object in -// the interpolation scope, like "path.module". -type PathAttr struct { - referenceable - Name string -} - -func (pa PathAttr) String() string { - return "path." + pa.Name -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider.go b/vendor/github.com/hashicorp/terraform/addrs/provider.go deleted file mode 100644 index 1a9b4cd4..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/provider.go +++ /dev/null @@ -1,230 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform/tfdiags" - "golang.org/x/net/idna" -) - -// Provider encapsulates a single provider type. In the future this will be -// extended to include additional fields including Namespace and SourceHost -type Provider struct { - Type string - Namespace string - Hostname svchost.Hostname -} - -// DefaultRegistryHost is the hostname used for provider addresses that do -// not have an explicit hostname. -const DefaultRegistryHost = svchost.Hostname("registry.terraform.io") - -// LegacyProviderNamespace is the special string used in the Namespace field -// of type Provider to mark a legacy provider address. This special namespace -// value would normally be invalid, and can be used only when the hostname is -// DefaultRegistryHost because that host owns the mapping from legacy name to -// FQN. -const LegacyProviderNamespace = "-" - -// String returns an FQN string, indended for use in output. -func (pt Provider) String() string { - return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type -} - -// NewDefaultProvider returns the default address of a HashiCorp-maintained, -// Registry-hosted provider. -func NewDefaultProvider(name string) Provider { - return Provider{ - Type: name, - Namespace: "hashicorp", - Hostname: "registry.terraform.io", - } -} - -// NewLegacyProvider returns a mock address for a provider. -// This will be removed when ProviderType is fully integrated. -func NewLegacyProvider(name string) Provider { - return Provider{ - Type: name, - Namespace: LegacyProviderNamespace, - Hostname: DefaultRegistryHost, - } -} - -// LegacyString returns the provider type, which is frequently used -// interchangeably with provider name. This function can and should be removed -// when provider type is fully integrated. As a safeguard for future -// refactoring, this function panics if the Provider is not a legacy provider. -func (pt Provider) LegacyString() string { - if pt.Namespace != "-" { - panic("not a legacy Provider") - } - return pt.Type -} - -// ParseProviderSourceString parses the source attribute and returns a provider. -// This is intended primarily to parse the FQN-like strings returned by -// terraform-config-inspect. -// -// The following are valid source string formats: -// name -// namespace/name -// hostname/namespace/name -func ParseProviderSourceString(str string) (Provider, tfdiags.Diagnostics) { - var ret Provider - var diags tfdiags.Diagnostics - - // split the source string into individual components - parts := strings.Split(str, "/") - if len(parts) == 0 || len(parts) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source string", - Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, - }) - return ret, diags - } - - // check for an invalid empty string in any part - for i := range parts { - if parts[i] == "" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source string", - Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`, - }) - return ret, diags - } - } - - // check the 'name' portion, which is always the last part - givenName := parts[len(parts)-1] - name, err := ParseProviderPart(givenName) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider type", - Detail: fmt.Sprintf(`Invalid provider type %q in source %q: %s"`, givenName, str, err), - }) - return ret, diags - } - ret.Type = name - ret.Hostname = DefaultRegistryHost - - if len(parts) == 1 { - return NewDefaultProvider(parts[0]), diags - } - - if len(parts) >= 2 { - // the namespace is always the second-to-last part - givenNamespace := parts[len(parts)-2] - if givenNamespace == LegacyProviderNamespace { - // For now we're tolerating legacy provider addresses until we've - // finished updating the rest of the codebase to no longer use them, - // or else we'd get errors round-tripping through legacy subsystems. - ret.Namespace = LegacyProviderNamespace - } else { - namespace, err := ParseProviderPart(givenNamespace) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider namespace", - Detail: fmt.Sprintf(`Invalid provider namespace %q in source %q: %s"`, namespace, str, err), - }) - return Provider{}, diags - } - ret.Namespace = namespace - } - } - - // Final Case: 3 parts - if len(parts) == 3 { - // the namespace is always the first part in a three-part source string - hn, err := svchost.ForComparison(parts[0]) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider source hostname", - Detail: fmt.Sprintf(`Invalid provider source hostname namespace %q in source %q: %s"`, hn, str, err), - }) - return Provider{}, diags - } - ret.Hostname = hn - } - - if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost { - // Legacy provider addresses must always be on the default registry - // host, because the default registry host decides what actual FQN - // each one maps to. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider namespace", - Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".", - }) - return Provider{}, diags - } - - return ret, diags -} - -// ParseProviderPart processes an addrs.Provider namespace or type string -// provided by an end-user, producing a normalized version if possible or -// an error if the string contains invalid characters. -// -// A provider part is processed in the same way as an individual label in a DNS -// domain name: it is transformed to lowercase per the usual DNS case mapping -// and normalization rules and may contain only letters, digits, and dashes. -// Additionally, dashes may not appear at the start or end of the string. -// -// These restrictions are intended to allow these names to appear in fussy -// contexts such as directory/file names on case-insensitive filesystems, -// repository names on GitHub, etc. We're using the DNS rules in particular, -// rather than some similar rules defined locally, because the hostname part -// of an addrs.Provider is already a hostname and it's ideal to use exactly -// the same case folding and normalization rules for all of the parts. -// -// In practice a provider type string conventionally does not contain dashes -// either. Such names are permitted, but providers with such type names will be -// hard to use because their resource type names will not be able to contain -// the provider type name and thus each resource will need an explicit provider -// address specified. (A real-world example of such a provider is the -// "google-beta" variant of the GCP provider, which has resource types that -// start with the "google_" prefix instead.) -// -// It's valid to pass the result of this function as the argument to a -// subsequent call, in which case the result will be identical. -func ParseProviderPart(given string) (string, error) { - if len(given) == 0 { - return "", fmt.Errorf("must have at least one character") - } - - // We're going to process the given name using the same "IDNA" library we - // use for the hostname portion, since it already implements the case - // folding rules we want. - // - // The idna library doesn't expose individual label parsing directly, but - // once we've verified it doesn't contain any dots we can just treat it - // like a top-level domain for this library's purposes. - if strings.ContainsRune(given, '.') { - return "", fmt.Errorf("dots are not allowed") - } - - // We don't allow names containing multiple consecutive dashes, just as - // a matter of preference: they look weird, confusing, or incorrect. - // This also, as a side-effect, prevents the use of the "punycode" - // indicator prefix "xn--" that would cause the IDNA library to interpret - // the given name as punycode, because that would be weird and unexpected. - if strings.Contains(given, "--") { - return "", fmt.Errorf("cannot use multiple consecutive dashes") - } - - result, err := idna.Lookup.ToUnicode(given) - if err != nil { - return "", fmt.Errorf("must contain only letters, digits, and dashes, and may not use leading or trailing dashes") - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go deleted file mode 100644 index 965cd7d1..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go +++ /dev/null @@ -1,219 +0,0 @@ -package addrs - -import ( - "fmt" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ProviderConfig is the address of a provider configuration. -type ProviderConfig struct { - Type Provider - - // If not empty, Alias identifies which non-default (aliased) provider - // configuration this address refers to. - Alias string -} - -// NewDefaultProviderConfig returns the address of the default (un-aliased) -// configuration for the provider with the given type name. -func NewDefaultProviderConfig(typeName string) ProviderConfig { - return ProviderConfig{ - Type: NewLegacyProvider(typeName), - } -} - -// Absolute returns an AbsProviderConfig from the receiver and the given module -// instance address. -func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { - return AbsProviderConfig{ - Module: module, - ProviderConfig: pc, - } -} - -func (pc ProviderConfig) String() string { - if pc.Type.LegacyString() == "" { - // Should never happen; always indicates a bug - return "provider." - } - - if pc.Alias != "" { - return fmt.Sprintf("provider.%s.%s", pc.Type.LegacyString(), pc.Alias) - } - - return "provider." + pc.Type.LegacyString() -} - -// StringCompact is an alternative to String that returns the form that can -// be parsed by ParseProviderConfigCompact, without the "provider." prefix. -func (pc ProviderConfig) StringCompact() string { - if pc.Alias != "" { - return fmt.Sprintf("%s.%s", pc.Type.LegacyString(), pc.Alias) - } - return pc.Type.LegacyString() -} - -// AbsProviderConfig is the absolute address of a provider configuration -// within a particular module instance. -type AbsProviderConfig struct { - Module ModuleInstance - ProviderConfig ProviderConfig -} - -// ParseAbsProviderConfig parses the given traversal as an absolute provider -// address. The following are examples of traversals that can be successfully -// parsed as absolute provider configuration addresses: -// -// provider.aws -// provider.aws.foo -// module.bar.provider.aws -// module.bar.module.baz.provider.aws.foo -// module.foo[1].provider.aws.foo -// -// This type of address is used, for example, to record the relationships -// between resources and provider configurations in the state structure. -// This type of address is not generally used in the UI, except in error -// messages that refer to provider configurations. -func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { - modInst, remain, diags := parseModuleInstancePrefix(traversal) - ret := AbsProviderConfig{ - Module: modInst, - } - if len(remain) < 2 || remain.RootName() != "provider" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", - Subject: remain.SourceRange().Ptr(), - }) - return ret, diags - } - if len(remain) > 3 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous operators after provider configuration alias.", - Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), - }) - return ret, diags - } - - if tt, ok := remain[1].(hcl.TraverseAttr); ok { - ret.ProviderConfig.Type = NewLegacyProvider(tt.Name) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The prefix \"provider.\" must be followed by a provider type name.", - Subject: remain[1].SourceRange().Ptr(), - }) - return ret, diags - } - - if len(remain) == 3 { - if tt, ok := remain[2].(hcl.TraverseAttr); ok { - ret.ProviderConfig.Alias = tt.Name - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Provider type name must be followed by a configuration alias name.", - Subject: remain[2].SourceRange().Ptr(), - }) - return ret, diags - } - } - - return ret, diags -} - -// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseAbsProviderConfig. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// the returned address is invalid. -func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return AbsProviderConfig{}, diags - } - - addr, addrDiags := ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -// ProviderConfigDefault returns the address of the default provider config -// of the given type inside the recieving module instance. -func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m, - ProviderConfig: ProviderConfig{ - Type: NewLegacyProvider(name), - }, - } -} - -// ProviderConfigAliased returns the address of an aliased provider config -// of with given type and alias inside the recieving module instance. -func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { - return AbsProviderConfig{ - Module: m, - ProviderConfig: ProviderConfig{ - Type: NewLegacyProvider(name), - Alias: alias, - }, - } -} - -// Inherited returns an address that the receiving configuration address might -// inherit from in a parent module. The second bool return value indicates if -// such inheritance is possible, and thus whether the returned address is valid. -// -// Inheritance is possible only for default (un-aliased) providers in modules -// other than the root module. Even if a valid address is returned, inheritence -// may not be performed for other reasons, such as if the calling module -// provided explicit provider configurations within the call for this module. -// The ProviderTransformer graph transform in the main terraform module has -// the authoritative logic for provider inheritance, and this method is here -// mainly just for its benefit. -func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { - // Can't inherit if we're already in the root. - if len(pc.Module) == 0 { - return AbsProviderConfig{}, false - } - - // Can't inherit if we have an alias. - if pc.ProviderConfig.Alias != "" { - return AbsProviderConfig{}, false - } - - // Otherwise, we might inherit from a configuration with the same - // provider name in the parent module instance. - parentMod := pc.Module.Parent() - return pc.ProviderConfig.Absolute(parentMod), true -} - -func (pc AbsProviderConfig) String() string { - if len(pc.Module) == 0 { - return pc.ProviderConfig.String() - } - return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go deleted file mode 100644 index 211083a5..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go +++ /dev/null @@ -1,20 +0,0 @@ -package addrs - -// Referenceable is an interface implemented by all address types that can -// appear as references in configuration language expressions. -type Referenceable interface { - // All implementations of this interface must be covered by the type switch - // in lang.Scope.buildEvalContext. - referenceableSigil() - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseRef to produce an identical - // result. - String() string -} - -type referenceable struct { -} - -func (r referenceable) referenceableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource.go b/vendor/github.com/hashicorp/terraform/addrs/resource.go deleted file mode 100644 index bb4831ea..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/resource.go +++ /dev/null @@ -1,271 +0,0 @@ -package addrs - -import ( - "fmt" - "strings" -) - -// Resource is an address for a resource block within configuration, which -// contains potentially-multiple resource instances if that configuration -// block uses "count" or "for_each". -type Resource struct { - referenceable - Mode ResourceMode - Type string - Name string -} - -func (r Resource) String() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - // Should never happen, but we'll return a string here rather than - // crashing just in case it does. - return fmt.Sprintf(".%s.%s", r.Type, r.Name) - } -} - -func (r Resource) Equal(o Resource) bool { - return r.String() == o.String() -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r Resource) Instance(key InstanceKey) ResourceInstance { - return ResourceInstance{ - Resource: r, - Key: key, - } -} - -// Absolute returns an AbsResource from the receiver and the given module -// instance address. -func (r Resource) Absolute(module ModuleInstance) AbsResource { - return AbsResource{ - Module: module, - Resource: r, - } -} - -// DefaultProviderConfig returns the address of the provider configuration -// that should be used for the resource identified by the reciever if it -// does not have a provider configuration address explicitly set in -// configuration. -// -// This method is not able to verify that such a configuration exists, nor -// represent the behavior of automatically inheriting certain provider -// configurations from parent modules. It just does a static analysis of the -// receiving address and returns an address to start from, relative to the -// same module that contains the resource. -func (r Resource) DefaultProviderConfig() ProviderConfig { - typeName := r.Type - if under := strings.Index(typeName, "_"); under != -1 { - typeName = typeName[:under] - } - - return ProviderConfig{ - Type: NewLegacyProvider(typeName), - } -} - -// ResourceInstance is an address for a specific instance of a resource. -// When a resource is defined in configuration with "count" or "for_each" it -// produces zero or more instances, which can be addressed using this type. -type ResourceInstance struct { - referenceable - Resource Resource - Key InstanceKey -} - -func (r ResourceInstance) ContainingResource() Resource { - return r.Resource -} - -func (r ResourceInstance) String() string { - if r.Key == NoKey { - return r.Resource.String() - } - return r.Resource.String() + r.Key.String() -} - -func (r ResourceInstance) Equal(o ResourceInstance) bool { - return r.String() == o.String() -} - -// Absolute returns an AbsResourceInstance from the receiver and the given module -// instance address. -func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { - return AbsResourceInstance{ - Module: module, - Resource: r, - } -} - -// AbsResource is an absolute address for a resource under a given module path. -type AbsResource struct { - targetable - Module ModuleInstance - Resource Resource -} - -// Resource returns the address of a particular resource within the receiver. -func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { - return AbsResource{ - Module: m, - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - } -} - -// Instance produces the address for a specific instance of the receiver -// that is idenfied by the given key. -func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: r.Module, - Resource: r.Resource.Instance(key), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is either equal to the receiver or is an instance of the -// receiver. -func (r AbsResource) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResource: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - case AbsResourceInstance: - return r.TargetContains(to.ContainingResource()) - - default: - return false - - } -} - -func (r AbsResource) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -func (r AbsResource) Equal(o AbsResource) bool { - return r.String() == o.String() -} - -// AbsResourceInstance is an absolute address for a resource instance under a -// given module path. -type AbsResourceInstance struct { - targetable - Module ModuleInstance - Resource ResourceInstance -} - -// ResourceInstance returns the address of a particular resource instance within the receiver. -func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { - return AbsResourceInstance{ - Module: m, - Resource: ResourceInstance{ - Resource: Resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - Key: key, - }, - } -} - -// ContainingResource returns the address of the resource that contains the -// receving resource instance. In other words, it discards the key portion -// of the address to produce an AbsResource value. -func (r AbsResourceInstance) ContainingResource() AbsResource { - return AbsResource{ - Module: r.Module, - Resource: r.Resource.ContainingResource(), - } -} - -// TargetContains implements Targetable by returning true if the given other -// address is equal to the receiver. -func (r AbsResourceInstance) TargetContains(other Targetable) bool { - switch to := other.(type) { - - case AbsResourceInstance: - // We'll use our stringification as a cheat-ish way to test for equality. - return to.String() == r.String() - - default: - return false - - } -} - -func (r AbsResourceInstance) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { - return r.String() == o.String() -} - -// Less returns true if the receiver should sort before the given other value -// in a sorted list of addresses. -func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { - switch { - - case len(r.Module) != len(o.Module): - return len(r.Module) < len(o.Module) - - case r.Module.String() != o.Module.String(): - return r.Module.Less(o.Module) - - case r.Resource.Resource.Mode != o.Resource.Resource.Mode: - return r.Resource.Resource.Mode == DataResourceMode - - case r.Resource.Resource.Type != o.Resource.Resource.Type: - return r.Resource.Resource.Type < o.Resource.Resource.Type - - case r.Resource.Resource.Name != o.Resource.Resource.Name: - return r.Resource.Resource.Name < o.Resource.Resource.Name - - case r.Resource.Key != o.Resource.Key: - return InstanceKeyLess(r.Resource.Key, o.Resource.Key) - - default: - return false - - } -} - -// ResourceMode defines which lifecycle applies to a given resource. Each -// resource lifecycle has a slightly different address format. -type ResourceMode rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode - -const ( - // InvalidResourceMode is the zero value of ResourceMode and is not - // a valid resource mode. - InvalidResourceMode ResourceMode = 0 - - // ManagedResourceMode indicates a managed resource, as defined by - // "resource" blocks in configuration. - ManagedResourceMode ResourceMode = 'M' - - // DataResourceMode indicates a data resource, as defined by - // "data" blocks in configuration. - DataResourceMode ResourceMode = 'D' -) diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go deleted file mode 100644 index 9bdbdc42..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go +++ /dev/null @@ -1,105 +0,0 @@ -package addrs - -import "fmt" - -// ResourceInstancePhase is a special kind of reference used only internally -// during graph building to represent resource instances that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via an instance phase -// or can declare that they reference an instance phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourceInstancePhase struct { - referenceable - ResourceInstance ResourceInstance - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourceInstancePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { - return ResourceInstancePhase{ - ResourceInstance: r, - Phase: rpt, - } -} - -// ContainingResource returns an address for the same phase of the resource -// that this instance belongs to. -func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { - return rp.ResourceInstance.Resource.Phase(rp.Phase) -} - -func (rp ResourceInstancePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) -} - -// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. -type ResourceInstancePhaseType string - -const ( - // ResourceInstancePhaseDestroy represents the "destroy" phase of a - // resource instance. - ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" - - // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy - // but is used for resources that have "create_before_destroy" set, thus - // requiring a different dependency ordering. - ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" -) - -func (rpt ResourceInstancePhaseType) String() string { - return string(rpt) -} - -// ResourcePhase is a special kind of reference used only internally -// during graph building to represent resources that are in a -// non-primary state. -// -// Graph nodes can declare themselves referenceable via a resource phase -// or can declare that they reference a resource phase in order to accomodate -// secondary graph nodes dealing with, for example, destroy actions. -// -// Since resources (as opposed to instances) aren't actually phased, this -// address type is used only as an approximation during initial construction -// of the resource-oriented plan graph, under the assumption that resource -// instances with ResourceInstancePhase addresses will be created in dynamic -// subgraphs during the graph walk. -// -// This special reference type cannot be accessed directly by end-users, and -// should never be shown in the UI. -type ResourcePhase struct { - referenceable - Resource Resource - Phase ResourceInstancePhaseType -} - -var _ Referenceable = ResourcePhase{} - -// Phase returns a special "phase address" for the receving instance. See the -// documentation of ResourceInstancePhase for the limited situations where this -// is intended to be used. -func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { - return ResourcePhase{ - Resource: r, - Phase: rpt, - } -} - -func (rp ResourcePhase) String() string { - // We use a different separator here than usual to ensure that we'll - // never conflict with any non-phased resource instance string. This - // is intentionally something that would fail parsing with ParseRef, - // because this special address type should never be exposed in the UI. - return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go deleted file mode 100644 index 0b5c33f8..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. - -package addrs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidResourceMode-0] - _ = x[ManagedResourceMode-77] - _ = x[DataResourceMode-68] -} - -const ( - _ResourceMode_name_0 = "InvalidResourceMode" - _ResourceMode_name_1 = "DataResourceMode" - _ResourceMode_name_2 = "ManagedResourceMode" -) - -func (i ResourceMode) String() string { - switch { - case i == 0: - return _ResourceMode_name_0 - case i == 68: - return _ResourceMode_name_1 - case i == 77: - return _ResourceMode_name_2 - default: - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/self.go b/vendor/github.com/hashicorp/terraform/addrs/self.go deleted file mode 100644 index 7f24eaf0..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/self.go +++ /dev/null @@ -1,14 +0,0 @@ -package addrs - -// Self is the address of the special object "self" that behaves as an alias -// for a containing object currently in scope. -const Self selfT = 0 - -type selfT int - -func (s selfT) referenceableSigil() { -} - -func (s selfT) String() string { - return "self" -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/targetable.go b/vendor/github.com/hashicorp/terraform/addrs/targetable.go deleted file mode 100644 index 16819a5a..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/targetable.go +++ /dev/null @@ -1,26 +0,0 @@ -package addrs - -// Targetable is an interface implemented by all address types that can be -// used as "targets" for selecting sub-graphs of a graph. -type Targetable interface { - targetableSigil() - - // TargetContains returns true if the receiver is considered to contain - // the given other address. Containment, for the purpose of targeting, - // means that if a container address is targeted then all of the - // addresses within it are also implicitly targeted. - // - // A targetable address always contains at least itself. - TargetContains(other Targetable) bool - - // String produces a string representation of the address that could be - // parsed as a HCL traversal and passed to ParseTarget to produce an - // identical result. - String() string -} - -type targetable struct { -} - -func (r targetable) targetableSigil() { -} diff --git a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go deleted file mode 100644 index a880182a..00000000 --- a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go +++ /dev/null @@ -1,12 +0,0 @@ -package addrs - -// TerraformAttr is the address of an attribute of the "terraform" object in -// the interpolation scope, like "terraform.workspace". -type TerraformAttr struct { - referenceable - Name string -} - -func (ta TerraformAttr) String() string { - return "terraform." + ta.Name -} diff --git a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go deleted file mode 100644 index 6b823265..00000000 --- a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go +++ /dev/null @@ -1,340 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcled" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/hashicorp/terraform/tfdiags" - "github.com/mitchellh/colorstring" - wordwrap "github.com/mitchellh/go-wordwrap" - "github.com/zclconf/go-cty/cty" -) - -// Diagnostic formats a single diagnostic message. -// -// The width argument specifies at what column the diagnostic messages will -// be wrapped. If set to zero, messages will not be wrapped by this function -// at all. Although the long-form text parts of the message are wrapped, -// not all aspects of the message are guaranteed to fit within the specified -// terminal width. -func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { - if diag == nil { - // No good reason to pass a nil diagnostic in here... - return "" - } - - var buf bytes.Buffer - - switch diag.Severity() { - case tfdiags.Error: - buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) - case tfdiags.Warning: - buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) - default: - // Clear out any coloring that might be applied by Terraform's UI helper, - // so our result is not context-sensitive. - buf.WriteString(color.Color("\n[reset]")) - } - - desc := diag.Description() - sourceRefs := diag.Source() - - // We don't wrap the summary, since we expect it to be terse, and since - // this is where we put the text of a native Go error it may not always - // be pure text that lends itself well to word-wrapping. - fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) - - if sourceRefs.Subject != nil { - // We'll borrow HCL's range implementation here, because it has some - // handy features to help us produce a nice source code snippet. - highlightRange := sourceRefs.Subject.ToHCL() - snippetRange := highlightRange - if sourceRefs.Context != nil { - snippetRange = sourceRefs.Context.ToHCL() - } - - // Make sure the snippet includes the highlight. This should be true - // for any reasonable diagnostic, but we'll make sure. - snippetRange = hcl.RangeOver(snippetRange, highlightRange) - if snippetRange.Empty() { - snippetRange.End.Byte++ - snippetRange.End.Column++ - } - if highlightRange.Empty() { - highlightRange.End.Byte++ - highlightRange.End.Column++ - } - - var src []byte - if sources != nil { - src = sources[snippetRange.Filename] - } - if src == nil { - // This should generally not happen, as long as sources are always - // loaded through the main loader. We may load things in other - // ways in weird cases, so we'll tolerate it at the expense of - // a not-so-helpful error message. - fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) - } else { - file, offset := parseRange(src, highlightRange) - - headerRange := highlightRange - - contextStr := hcled.ContextString(file, offset-1) - if contextStr != "" { - contextStr = ", in " + contextStr - } - - fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) - - // Config snippet rendering - sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) - for sc.Scan() { - lineRange := sc.Range() - if !lineRange.Overlaps(snippetRange) { - continue - } - beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) - before := beforeRange.SliceBytes(src) - highlighted := highlightedRange.SliceBytes(src) - after := afterRange.SliceBytes(src) - fmt.Fprintf( - &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), - lineRange.Start.Line, - before, highlighted, after, - ) - } - - } - - if fromExpr := diag.FromExpr(); fromExpr != nil { - // We may also be able to generate information about the dynamic - // values of relevant variables at the point of evaluation, then. - // This is particularly useful for expressions that get evaluated - // multiple times with different values, such as blocks using - // "count" and "for_each", or within "for" expressions. - expr := fromExpr.Expression - ctx := fromExpr.EvalContext - vars := expr.Variables() - stmts := make([]string, 0, len(vars)) - seen := make(map[string]struct{}, len(vars)) - Traversals: - for _, traversal := range vars { - for len(traversal) > 1 { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // Skip anything that generates errors, since we probably - // already have the same error in our diagnostics set - // already. - traversal = traversal[:len(traversal)-1] - continue - } - - traversalStr := traversalStr(traversal) - if _, exists := seen[traversalStr]; exists { - continue Traversals // don't show duplicates when the same variable is referenced multiple times - } - switch { - case !val.IsKnown(): - // Can't say anything about this yet, then. - continue Traversals - case val.IsNull(): - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) - default: - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) - } - seen[traversalStr] = struct{}{} - } - } - - sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? - - if len(stmts) > 0 { - fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) - } - for _, stmt := range stmts { - fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) - } - } - - buf.WriteByte('\n') - } - - if desc.Detail != "" { - detail := desc.Detail - if width != 0 { - detail = wordwrap.WrapString(detail, uint(width)) - } - fmt.Fprintf(&buf, "%s\n", detail) - } - - return buf.String() -} - -// DiagnosticWarningsCompact is an alternative to Diagnostic for when all of -// the given diagnostics are warnings and we want to show them compactly, -// with only two lines per warning and excluding all of the detail information. -// -// The caller may optionally pre-process the given diagnostics with -// ConsolidateWarnings, in which case this function will recognize consolidated -// messages and include an indication that they are consolidated. -// -// Do not pass non-warning diagnostics to this function, or the result will -// be nonsense. -func DiagnosticWarningsCompact(diags tfdiags.Diagnostics, color *colorstring.Colorize) string { - var b strings.Builder - b.WriteString(color.Color("[bold][yellow]Warnings:[reset]\n\n")) - for _, diag := range diags { - sources := tfdiags.WarningGroupSourceRanges(diag) - b.WriteString(fmt.Sprintf("- %s\n", diag.Description().Summary)) - if len(sources) > 0 { - mainSource := sources[0] - if mainSource.Subject != nil { - if len(sources) > 1 { - b.WriteString(fmt.Sprintf( - " on %s line %d (and %d more)\n", - mainSource.Subject.Filename, - mainSource.Subject.Start.Line, - len(sources)-1, - )) - } else { - b.WriteString(fmt.Sprintf( - " on %s line %d\n", - mainSource.Subject.Filename, - mainSource.Subject.Start.Line, - )) - } - } else if len(sources) > 1 { - b.WriteString(fmt.Sprintf( - " (%d occurences of this warning)\n", - len(sources), - )) - } - } - } - - return b.String() -} - -func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { - filename := rng.Filename - offset := rng.Start.Byte - - // We need to re-parse here to get a *hcl.File we can interrogate. This - // is not awesome since we presumably already parsed the file earlier too, - // but this re-parsing is architecturally simpler than retaining all of - // the hcl.File objects and we only do this in the case of an error anyway - // so the overhead here is not a big problem. - parser := hclparse.NewParser() - var file *hcl.File - var diags hcl.Diagnostics - if strings.HasSuffix(filename, ".json") { - file, diags = parser.ParseJSON(src, filename) - } else { - file, diags = parser.ParseHCL(src, filename) - } - if diags.HasErrors() { - return file, offset - } - - return file, offset -} - -// traversalStr produces a representation of an HCL traversal that is compact, -// resembles HCL native syntax, and is suitable for display in the UI. -func traversalStr(traversal hcl.Traversal) string { - // This is a specialized subset of traversal rendering tailored to - // producing helpful contextual messages in diagnostics. It is not - // comprehensive nor intended to be used for other purposes. - - var buf bytes.Buffer - for _, step := range traversal { - switch tStep := step.(type) { - case hcl.TraverseRoot: - buf.WriteString(tStep.Name) - case hcl.TraverseAttr: - buf.WriteByte('.') - buf.WriteString(tStep.Name) - case hcl.TraverseIndex: - buf.WriteByte('[') - if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { - buf.WriteString(compactValueStr(tStep.Key)) - } else { - // We'll just use a placeholder for more complex values, - // since otherwise our result could grow ridiculously long. - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} - -// compactValueStr produces a compact, single-line summary of a given value -// that is suitable for display in the UI. -// -// For primitives it returns a full representation, while for more complex -// types it instead summarizes the type, size, etc to produce something -// that is hopefully still somewhat useful but not as verbose as a rendering -// of the entire data structure. -func compactValueStr(val cty.Value) string { - // This is a specialized subset of value rendering tailored to producing - // helpful but concise messages in diagnostics. It is not comprehensive - // nor intended to be used for other purposes. - - ty := val.Type() - switch { - case val.IsNull(): - return "null" - case !val.IsKnown(): - // Should never happen here because we should filter before we get - // in here, but we'll do something reasonable rather than panic. - return "(not yet known)" - case ty == cty.Bool: - if val.True() { - return "true" - } - return "false" - case ty == cty.Number: - bf := val.AsBigFloat() - return bf.Text('g', 10) - case ty == cty.String: - // Go string syntax is not exactly the same as HCL native string syntax, - // but we'll accept the minor edge-cases where this is different here - // for now, just to get something reasonable here. - return fmt.Sprintf("%q", val.AsString()) - case ty.IsCollectionType() || ty.IsTupleType(): - l := val.LengthInt() - switch l { - case 0: - return "empty " + ty.FriendlyName() - case 1: - return ty.FriendlyName() + " with 1 element" - default: - return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) - } - case ty.IsObjectType(): - atys := ty.AttributeTypes() - l := len(atys) - switch l { - case 0: - return "object with no attributes" - case 1: - var name string - for k := range atys { - name = k - } - return fmt.Sprintf("object with 1 attribute %q", name) - default: - return fmt.Sprintf("object with %d attributes", l) - } - default: - return ty.FriendlyName() - } -} diff --git a/vendor/github.com/hashicorp/terraform/command/format/diff.go b/vendor/github.com/hashicorp/terraform/command/format/diff.go deleted file mode 100644 index 2f2258de..00000000 --- a/vendor/github.com/hashicorp/terraform/command/format/diff.go +++ /dev/null @@ -1,1216 +0,0 @@ -package format - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - - "github.com/mitchellh/colorstring" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/states" -) - -// ResourceChange returns a string representation of a change to a particular -// resource, for inclusion in user-facing plan output. -// -// The resource schema must be provided along with the change so that the -// formatted change can reflect the configuration structure for the associated -// resource. -// -// If "color" is non-nil, it will be used to color the result. Otherwise, -// no color codes will be included. -func ResourceChange( - change *plans.ResourceInstanceChangeSrc, - tainted bool, - schema *configschema.Block, - color *colorstring.Colorize, -) string { - addr := change.Addr - var buf bytes.Buffer - - if color == nil { - color = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, - Reset: false, - } - } - - dispAddr := addr.String() - if change.DeposedKey != states.NotDeposed { - dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) - } - - switch change.Action { - case plans.Create: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) - case plans.Read: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) - case plans.Update: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) - case plans.CreateThenDelete, plans.DeleteThenCreate: - if tainted { - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) - } else { - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) - } - case plans.Delete: - buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) - default: - // should never happen, since the above is exhaustive - buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) - } - buf.WriteString(color.Color("[reset]\n")) - - buf.WriteString(color.Color(DiffActionSymbol(change.Action)) + " ") - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - buf.WriteString(fmt.Sprintf( - "resource %q %q", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - case addrs.DataResourceMode: - buf.WriteString(fmt.Sprintf( - "data %q %q ", - addr.Resource.Resource.Type, - addr.Resource.Resource.Name, - )) - default: - // should never happen, since the above is exhaustive - buf.WriteString(addr.String()) - } - - buf.WriteString(" {") - - p := blockBodyDiffPrinter{ - buf: &buf, - color: color, - action: change.Action, - requiredReplace: change.RequiredReplace, - } - - // Most commonly-used resources have nested blocks that result in us - // going at least three traversals deep while we recurse here, so we'll - // start with that much capacity and then grow as needed for deeper - // structures. - path := make(cty.Path, 0, 3) - - changeV, err := change.Decode(schema.ImpliedType()) - if err != nil { - // Should never happen in here, since we've already been through - // loads of layers of encode/decode of the planned changes before now. - panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) - } - - // We currently have an opt-out that permits the legacy SDK to return values - // that defy our usual conventions around handling of nesting blocks. To - // avoid the rendering code from needing to handle all of these, we'll - // normalize first. - // (Ideally we'd do this as part of the SDK opt-out implementation in core, - // but we've added it here for now to reduce risk of unexpected impacts - // on other code in core.) - changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) - changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) - - bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) - if bodyWritten { - buf.WriteString("\n") - buf.WriteString(strings.Repeat(" ", 4)) - } - buf.WriteString("}\n") - - return buf.String() -} - -type blockBodyDiffPrinter struct { - buf *bytes.Buffer - color *colorstring.Colorize - action plans.Action - requiredReplace cty.PathSet -} - -const forcesNewResourceCaption = " [red]# forces replacement[reset]" - -// writeBlockBodyDiff writes attribute or block differences -// and returns true if any differences were found and written -func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { - path = ctyEnsurePathCapacity(path, 1) - - bodyWritten := false - blankBeforeBlocks := false - { - attrNames := make([]string, 0, len(schema.Attributes)) - attrNameLen := 0 - for name := range schema.Attributes { - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - if oldVal.IsNull() && newVal.IsNull() { - // Skip attributes where both old and new values are null - // (we do this early here so that we'll do our value alignment - // based on the longest attribute name that has a change, rather - // than the longest attribute name in the full set.) - continue - } - - attrNames = append(attrNames, name) - if len(name) > attrNameLen { - attrNameLen = len(name) - } - } - sort.Strings(attrNames) - if len(attrNames) > 0 { - blankBeforeBlocks = true - } - - for _, name := range attrNames { - attrS := schema.Attributes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - bodyWritten = true - p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) - } - } - - { - blockTypeNames := make([]string, 0, len(schema.BlockTypes)) - for name := range schema.BlockTypes { - blockTypeNames = append(blockTypeNames, name) - } - sort.Strings(blockTypeNames) - - for _, name := range blockTypeNames { - blockS := schema.BlockTypes[name] - oldVal := ctyGetAttrMaybeNull(old, name) - newVal := ctyGetAttrMaybeNull(new, name) - - bodyWritten = true - p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) - - // Always include a blank for any subsequent block types. - blankBeforeBlocks = true - } - } - - return bodyWritten -} - -func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { - path = append(path, cty.GetAttrStep{Name: name}) - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - showJustNew := false - var action plans.Action - switch { - case old.IsNull(): - action = plans.Create - showJustNew = true - case new.IsNull(): - action = plans.Delete - case ctyEqualWithUnknown(old, new): - action = plans.NoOp - showJustNew = true - default: - action = plans.Update - } - - p.writeActionSymbol(action) - - p.buf.WriteString(p.color.Color("[bold]")) - p.buf.WriteString(name) - p.buf.WriteString(p.color.Color("[reset]")) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) - p.buf.WriteString(" = ") - - if attrS.Sensitive { - p.buf.WriteString("(sensitive value)") - } else { - switch { - case showJustNew: - p.writeValue(new, action, indent+2) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - default: - // We show new even if it is null to emphasize the fact - // that it is being unset, since otherwise it is easy to - // misunderstand that the value is still set to the old value. - p.writeValueDiff(old, new, indent+2, path) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { - path = append(path, cty.GetAttrStep{Name: name}) - if old.IsNull() && new.IsNull() { - // Nothing to do if both old and new is null - return - } - - // Where old/new are collections representing a nesting mode other than - // NestingSingle, we assume the collection value can never be unknown - // since we always produce the container for the nested objects, even if - // the objects within are computed. - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - var action plans.Action - eqV := new.Equals(old) - switch { - case old.IsNull(): - action = plans.Create - case new.IsNull(): - action = plans.Delete - case !new.IsWhollyKnown() || !old.IsWhollyKnown(): - // "old" should actually always be known due to our contract - // that old values must never be unknown, but we'll allow it - // anyway to be robust. - action = plans.Update - case !eqV.IsKnown() || !eqV.True(): - action = plans.Update - } - - if blankBefore { - p.buf.WriteRune('\n') - } - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) - case configschema.NestingList: - // For the sake of handling nested blocks, we'll treat a null list - // the same as an empty list since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockListAsEmpty(old) - new = ctyNullBlockListAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - // Here we intentionally preserve the index-based correspondance - // between old and new, rather than trying to detect insertions - // and removals in the list, because this more accurately reflects - // how Terraform Core and providers will understand the change, - // particularly when the nested block contains computed attributes - // that will themselves maintain correspondance by index. - - // commonLen is number of elements that exist in both lists, which - // will be presented as updates (~). Any additional items in one - // of the lists will be presented as either creates (+) or deletes (-) - // depending on which list they belong to. - var commonLen int - switch { - case len(oldItems) < len(newItems): - commonLen = len(oldItems) - default: - commonLen = len(newItems) - } - - if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { - p.buf.WriteRune('\n') - } - - for i := 0; i < commonLen; i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := newItems[i] - action := plans.Update - if oldItem.RawEquals(newItem) { - action = plans.NoOp - } - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) - } - for i := commonLen; i < len(oldItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - oldItem := oldItems[i] - newItem := cty.NullVal(oldItem.Type()) - p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) - } - for i := commonLen; i < len(newItems); i++ { - path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) - newItem := newItems[i] - oldItem := cty.NullVal(newItem.Type()) - p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) - } - case configschema.NestingSet: - // For the sake of handling nested blocks, we'll treat a null set - // the same as an empty set since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockSetAsEmpty(old) - new = ctyNullBlockSetAsEmpty(new) - - oldItems := ctyCollectionValues(old) - newItems := ctyCollectionValues(new) - - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both sets are empty - return - } - - allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) - allItems = append(allItems, oldItems...) - allItems = append(allItems, newItems...) - all := cty.SetVal(allItems) - - if blankBefore { - p.buf.WriteRune('\n') - } - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - var action plans.Action - var oldValue, newValue cty.Value - switch { - case !val.IsKnown(): - action = plans.Update - newValue = val - case !old.HasElement(val).True(): - action = plans.Create - oldValue = cty.NullVal(val.Type()) - newValue = val - case !new.HasElement(val).True(): - action = plans.Delete - oldValue = val - newValue = cty.NullVal(val.Type()) - default: - action = plans.NoOp - oldValue = val - newValue = val - } - path := append(path, cty.IndexStep{Key: val}) - p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) - } - - case configschema.NestingMap: - // For the sake of handling nested blocks, we'll treat a null map - // the same as an empty map since the config language doesn't - // distinguish these anyway. - old = ctyNullBlockMapAsEmpty(old) - new = ctyNullBlockMapAsEmpty(new) - - oldItems := old.AsValueMap() - newItems := new.AsValueMap() - if (len(oldItems) + len(newItems)) == 0 { - // Nothing to do if both maps are empty - return - } - - allKeys := make(map[string]bool) - for k := range oldItems { - allKeys[k] = true - } - for k := range newItems { - allKeys[k] = true - } - allKeysOrder := make([]string, 0, len(allKeys)) - for k := range allKeys { - allKeysOrder = append(allKeysOrder, k) - } - sort.Strings(allKeysOrder) - - if blankBefore { - p.buf.WriteRune('\n') - } - - for _, k := range allKeysOrder { - var action plans.Action - oldValue := oldItems[k] - newValue := newItems[k] - switch { - case oldValue == cty.NilVal: - oldValue = cty.NullVal(newValue.Type()) - action = plans.Create - case newValue == cty.NilVal: - newValue = cty.NullVal(oldValue.Type()) - action = plans.Delete - case !newValue.RawEquals(oldValue): - action = plans.Update - default: - action = plans.NoOp - } - - path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) - p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) - } - } -} - -func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - p.writeActionSymbol(action) - - if label != nil { - fmt.Fprintf(p.buf, "%s %q {", name, *label) - } else { - fmt.Fprintf(p.buf, "%s {", name) - } - - if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - - bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) - if bodyWritten { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - } - p.buf.WriteString("}") -} - -func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { - if !val.IsKnown() { - p.buf.WriteString("(known after apply)") - return - } - if val.IsNull() { - p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) - return - } - - ty := val.Type() - - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - { - // Special behavior for JSON strings containing array or object - src := []byte(val.AsString()) - ty, err := ctyjson.ImpliedType(src) - // check for the special case of "null", which decodes to nil, - // and just allow it to be printed out directly - if err == nil && !ty.IsPrimitiveType() && strings.TrimSpace(val.AsString()) != "null" { - jv, err := ctyjson.Unmarshal(src, ty) - if err == nil { - p.buf.WriteString("jsonencode(") - if jv.LengthInt() == 0 { - p.writeValue(jv, action, 0) - } else { - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(jv, action, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteByte(')') - break // don't *also* do the normal behavior below - } - } - } - - if strings.Contains(val.AsString(), "\n") { - // It's a multi-line string, so we want to use the multi-line - // rendering so it'll be readable. Rather than re-implement - // that here, we'll just re-use the multi-line string diff - // printer with no changes, which ends up producing the - // result we want here. - // The path argument is nil because we don't track path - // information into strings and we know that a string can't - // have any indices or attributes that might need to be marked - // as (requires replacement), which is what that argument is for. - p.writeValueDiff(val, val, indent, nil) - break - } - - fmt.Fprintf(p.buf, "%q", val.AsString()) - case cty.Bool: - if val.True() { - p.buf.WriteString("true") - } else { - p.buf.WriteString("false") - } - case cty.Number: - bf := val.AsBigFloat() - p.buf.WriteString(bf.Text('f', -1)) - default: - // should never happen, since the above is exhaustive - fmt.Fprintf(p.buf, "%#v", val) - } - case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): - p.buf.WriteString("[") - - it := val.ElementIterator() - for it.Next() { - _, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",") - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("]") - case ty.IsMapType(): - p.buf.WriteString("{") - - keyLen := 0 - for it := val.ElementIterator(); it.Next(); { - key, _ := it.Element() - if keyStr := key.AsString(); len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - for it := val.ElementIterator(); it.Next(); { - key, val := it.Element() - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.writeValue(key, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if val.LengthInt() > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - case ty.IsObjectType(): - p.buf.WriteString("{") - - atys := ty.AttributeTypes() - attrNames := make([]string, 0, len(atys)) - nameLen := 0 - for attrName := range atys { - attrNames = append(attrNames, attrName) - if len(attrName) > nameLen { - nameLen = len(attrName) - } - } - sort.Strings(attrNames) - - for _, attrName := range attrNames { - val := val.GetAttr(attrName) - - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(action) - p.buf.WriteString(attrName) - p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) - p.buf.WriteString(" = ") - p.writeValue(val, action, indent+4) - } - - if len(attrNames) > 0 { - p.buf.WriteString("\n") - p.buf.WriteString(strings.Repeat(" ", indent)) - } - p.buf.WriteString("}") - } -} - -func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { - ty := old.Type() - typesEqual := ctyTypesEqual(ty, new.Type()) - - // We have some specialized diff implementations for certain complex - // values where it's useful to see a visualization of the diff of - // the nested elements rather than just showing the entire old and - // new values verbatim. - // However, these specialized implementations can apply only if both - // values are known and non-null. - if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { - switch { - case ty == cty.String: - // We have special behavior for both multi-line strings in general - // and for strings that can parse as JSON. For the JSON handling - // to apply, both old and new must be valid JSON. - // For single-line strings that don't parse as JSON we just fall - // out of this switch block and do the default old -> new rendering. - oldS := old.AsString() - newS := new.AsString() - - { - // Special behavior for JSON strings containing object or - // list values. - oldBytes := []byte(oldS) - newBytes := []byte(newS) - oldType, oldErr := ctyjson.ImpliedType(oldBytes) - newType, newErr := ctyjson.ImpliedType(newBytes) - if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { - oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) - newJV, newErr := ctyjson.Unmarshal(newBytes, newType) - if oldErr == nil && newErr == nil { - if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace - p.buf.WriteString("jsonencode(") - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(plans.Update) - p.writeValueDiff(oldJV, newJV, indent+4, path) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } else { - // if they differ only in insigificant whitespace - // then we'll note that but still expand out the - // effective value. - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) - } else { - p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) - } - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent+4)) - p.writeValue(oldJV, plans.NoOp, indent+4) - p.buf.WriteByte('\n') - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteByte(')') - } - return - } - } - } - - if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { - break - } - - p.buf.WriteString("<<~EOT") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var oldLines, newLines []cty.Value - { - r := strings.NewReader(oldS) - sc := bufio.NewScanner(r) - for sc.Scan() { - oldLines = append(oldLines, cty.StringVal(sc.Text())) - } - } - { - r := strings.NewReader(newS) - sc := bufio.NewScanner(r) - for sc.Scan() { - newLines = append(newLines, cty.StringVal(sc.Text())) - } - } - - diffLines := ctySequenceDiff(oldLines, newLines) - for _, diffLine := range diffLines { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(diffLine.Action) - - switch diffLine.Action { - case plans.NoOp, plans.Delete: - p.buf.WriteString(diffLine.Before.AsString()) - case plans.Create: - p.buf.WriteString(diffLine.After.AsString()) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return for strings - p.buf.WriteString(diffLine.After.AsString()) - - } - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol - p.buf.WriteString("EOT") - - return - - case ty.IsSetType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var addedVals, removedVals, allVals []cty.Value - for it := old.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if new.HasElement(val).False() { - removedVals = append(removedVals, val) - } - } - for it := new.ElementIterator(); it.Next(); { - _, val := it.Element() - allVals = append(allVals, val) - if val.IsKnown() && old.HasElement(val).False() { - addedVals = append(addedVals, val) - } - } - - var all, added, removed cty.Value - if len(allVals) > 0 { - all = cty.SetVal(allVals) - } else { - all = cty.SetValEmpty(ty.ElementType()) - } - if len(addedVals) > 0 { - added = cty.SetVal(addedVals) - } else { - added = cty.SetValEmpty(ty.ElementType()) - } - if len(removedVals) > 0 { - removed = cty.SetVal(removedVals) - } else { - removed = cty.SetValEmpty(ty.ElementType()) - } - - for it := all.ElementIterator(); it.Next(); { - _, val := it.Element() - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - - var action plans.Action - switch { - case !val.IsKnown(): - action = plans.Update - case added.HasElement(val).True(): - action = plans.Create - case removed.HasElement(val).True(): - action = plans.Delete - default: - action = plans.NoOp - } - - p.writeActionSymbol(action) - p.writeValue(val, action, indent+4) - p.buf.WriteString(",\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - case ty.IsListType() || ty.IsTupleType(): - p.buf.WriteString("[") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) - for _, elemDiff := range elemDiffs { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(elemDiff.Action) - switch elemDiff.Action { - case plans.NoOp, plans.Delete: - p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) - case plans.Update: - p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) - case plans.Create: - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return. - p.writeValue(elemDiff.After, elemDiff.Action, indent+4) - } - - p.buf.WriteString(",\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("]") - return - - case ty.IsMapType(): - p.buf.WriteString("{") - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - p.buf.WriteString("\n") - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - kV := cty.StringVal(k) - var action plans.Action - if old.HasIndex(kV).False() { - action = plans.Create - } else if new.HasIndex(kV).False() { - action = plans.Delete - } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { - action = plans.NoOp - } else { - action = plans.Update - } - - path := append(path, cty.IndexStep{Key: kV}) - - p.writeActionSymbol(action) - p.writeValue(kV, action, indent+4) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - switch action { - case plans.Create, plans.NoOp: - v := new.Index(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.Index(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.Index(kV) - newV := new.Index(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteByte('\n') - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - return - case ty.IsObjectType(): - p.buf.WriteString("{") - p.buf.WriteString("\n") - - forcesNewResource := p.pathForcesNewResource(path) - - var allKeys []string - keyLen := 0 - for it := old.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - for it := new.ElementIterator(); it.Next(); { - k, _ := it.Element() - keyStr := k.AsString() - allKeys = append(allKeys, keyStr) - if len(keyStr) > keyLen { - keyLen = len(keyStr) - } - } - - sort.Strings(allKeys) - - lastK := "" - for i, k := range allKeys { - if i > 0 && lastK == k { - continue // skip duplicates (list is sorted) - } - lastK = k - - p.buf.WriteString(strings.Repeat(" ", indent+2)) - kV := k - var action plans.Action - if !old.Type().HasAttribute(kV) { - action = plans.Create - } else if !new.Type().HasAttribute(kV) { - action = plans.Delete - } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { - action = plans.NoOp - } else { - action = plans.Update - } - - path := append(path, cty.GetAttrStep{Name: kV}) - - p.writeActionSymbol(action) - p.buf.WriteString(k) - p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) - p.buf.WriteString(" = ") - - switch action { - case plans.Create, plans.NoOp: - v := new.GetAttr(kV) - p.writeValue(v, action, indent+4) - case plans.Delete: - oldV := old.GetAttr(kV) - newV := cty.NullVal(oldV.Type()) - p.writeValueDiff(oldV, newV, indent+4, path) - default: - oldV := old.GetAttr(kV) - newV := new.GetAttr(kV) - p.writeValueDiff(oldV, newV, indent+4, path) - } - - p.buf.WriteString("\n") - } - - p.buf.WriteString(strings.Repeat(" ", indent)) - p.buf.WriteString("}") - - if forcesNewResource { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } - return - } - } - - // In all other cases, we just show the new and old values as-is - p.writeValue(old, plans.Delete, indent) - if new.IsNull() { - p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) - } else { - p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) - } - - p.writeValue(new, plans.Create, indent) - if p.pathForcesNewResource(path) { - p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) - } -} - -// writeActionSymbol writes a symbol to represent the given action, followed -// by a space. -// -// It only supports the actions that can be represented with a single character: -// Create, Delete, Update and NoAction. -func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { - switch action { - case plans.Create: - p.buf.WriteString(p.color.Color("[green]+[reset] ")) - case plans.Delete: - p.buf.WriteString(p.color.Color("[red]-[reset] ")) - case plans.Update: - p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) - case plans.NoOp: - p.buf.WriteString(" ") - default: - // Should never happen - p.buf.WriteString(p.color.Color("? ")) - } -} - -func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { - if !p.action.IsReplace() || p.requiredReplace.Empty() { - // "requiredReplace" only applies when the instance is being replaced, - // and we should only inspect that set if it is not empty - return false - } - return p.requiredReplace.Has(path) -} - -func ctyEmptyString(value cty.Value) bool { - if !value.IsNull() && value.IsKnown() { - valueType := value.Type() - if valueType == cty.String && value.AsString() == "" { - return true - } - } - return false -} - -func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { - attrType := val.Type().AttributeType(name) - - if val.IsNull() { - return cty.NullVal(attrType) - } - - // We treat "" as null here - // as existing SDK doesn't support null yet. - // This allows us to avoid spurious diffs - // until we introduce null to the SDK. - attrValue := val.GetAttr(name) - if ctyEmptyString(attrValue) { - return cty.NullVal(attrType) - } - - return attrValue -} - -func ctyCollectionValues(val cty.Value) []cty.Value { - if !val.IsKnown() || val.IsNull() { - return nil - } - - ret := make([]cty.Value, 0, val.LengthInt()) - for it := val.ElementIterator(); it.Next(); { - _, value := it.Element() - ret = append(ret, value) - } - return ret -} - -// ctySequenceDiff returns differences between given sequences of cty.Value(s) -// in the form of Create, Delete, or Update actions (for objects). -func ctySequenceDiff(old, new []cty.Value) []*plans.Change { - var ret []*plans.Change - lcs := objchange.LongestCommonSubsequence(old, new) - var oldI, newI, lcsI int - for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { - for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { - isObjectDiff := old[oldI].Type().IsObjectType() && newI < len(new) && new[newI].Type().IsObjectType() && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) - if isObjectDiff { - ret = append(ret, &plans.Change{ - Action: plans.Update, - Before: old[oldI], - After: new[newI], - }) - oldI++ - newI++ // we also consume the next "new" in this case - continue - } - - ret = append(ret, &plans.Change{ - Action: plans.Delete, - Before: old[oldI], - After: cty.NullVal(old[oldI].Type()), - }) - oldI++ - } - for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { - ret = append(ret, &plans.Change{ - Action: plans.Create, - Before: cty.NullVal(new[newI].Type()), - After: new[newI], - }) - newI++ - } - if lcsI < len(lcs) { - ret = append(ret, &plans.Change{ - Action: plans.NoOp, - Before: lcs[lcsI], - After: lcs[lcsI], - }) - - // All of our indexes advance together now, since the line - // is common to all three sequences. - lcsI++ - oldI++ - newI++ - } - } - return ret -} - -func ctyEqualWithUnknown(old, new cty.Value) bool { - if !old.IsWhollyKnown() || !new.IsWhollyKnown() { - return false - } - return old.Equals(new).True() -} - -// ctyTypesEqual checks equality of two types more loosely -// by avoiding checks of object/tuple elements -// as we render differences on element-by-element basis anyway -func ctyTypesEqual(oldT, newT cty.Type) bool { - if oldT.IsObjectType() && newT.IsObjectType() { - return true - } - if oldT.IsTupleType() && newT.IsTupleType() { - return true - } - return oldT.Equals(newT) -} - -func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { - if cap(path)-len(path) >= minExtra { - return path - } - newCap := cap(path) * 2 - if newCap < (len(path) + minExtra) { - newCap = len(path) + minExtra - } - newPath := make(cty.Path, len(path), newCap) - copy(newPath, path) - return newPath -} - -// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "list" is -// actually represented as a tuple type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsListType() { - return cty.ListValEmpty(ty.ElementType()) - } - return cty.EmptyTupleVal // must need a tuple, then -} - -// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -// -// In particular, this function handles the special situation where a "map" is -// actually represented as an object type where nested blocks contain -// dynamically-typed values. -func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - if ty := in.Type(); ty.IsMapType() { - return cty.MapValEmpty(ty.ElementType()) - } - return cty.EmptyObjectVal // must need an object, then -} - -// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil -// or returns an empty value of a suitable type to serve as a placeholder for it. -func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { - if !in.IsNull() { - return in - } - // Dynamically-typed attributes are not supported inside blocks backed by - // sets, so our result here is always a set. - return cty.SetValEmpty(in.Type().ElementType()) -} - -// DiffActionSymbol returns a string that, once passed through a -// colorstring.Colorize, will produce a result that can be written -// to a terminal to produce a symbol made of three printable -// characters, possibly interspersed with VT100 color codes. -func DiffActionSymbol(action plans.Action) string { - switch action { - case plans.DeleteThenCreate: - return "[red]-[reset]/[green]+[reset]" - case plans.CreateThenDelete: - return "[green]+[reset]/[red]-[reset]" - case plans.Create: - return " [green]+[reset]" - case plans.Delete: - return " [red]-[reset]" - case plans.Read: - return " [cyan]<=[reset]" - case plans.Update: - return " [yellow]~[reset]" - default: - return " ?" - } -} diff --git a/vendor/github.com/hashicorp/terraform/command/format/format.go b/vendor/github.com/hashicorp/terraform/command/format/format.go deleted file mode 100644 index aa8d7deb..00000000 --- a/vendor/github.com/hashicorp/terraform/command/format/format.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package format contains helpers for formatting various Terraform -// structures for human-readabout output. -// -// This package is used by the official Terraform CLI in formatting any -// output and is exported to encourage non-official frontends to mimic the -// output formatting as much as possible so that text formats of Terraform -// structures have a consistent look and feel. -package format diff --git a/vendor/github.com/hashicorp/terraform/command/format/object_id.go b/vendor/github.com/hashicorp/terraform/command/format/object_id.go deleted file mode 100644 index 85ebbfec..00000000 --- a/vendor/github.com/hashicorp/terraform/command/format/object_id.go +++ /dev/null @@ -1,123 +0,0 @@ -package format - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ObjectValueID takes a value that is assumed to be an object representation -// of some resource instance object and attempts to heuristically find an -// attribute of it that is likely to be a unique identifier in the remote -// system that it belongs to which will be useful to the user. -// -// If such an attribute is found, its name and string value intended for -// display are returned. Both returned strings are empty if no such attribute -// exists, in which case the caller should assume that the resource instance -// address within the Terraform configuration is the best available identifier. -// -// This is only a best-effort sort of thing, relying on naming conventions in -// our resource type schemas. The result is not guaranteed to be unique, but -// should generally be suitable for display to an end-user anyway. -// -// This function will panic if the given value is not of an object type. -func ObjectValueID(obj cty.Value) (k, v string) { - if obj.IsNull() || !obj.IsKnown() { - return "", "" - } - - atys := obj.Type().AttributeTypes() - - switch { - - case atys["id"] == cty.String: - v := obj.GetAttr("id") - if v.IsKnown() && !v.IsNull() { - return "id", v.AsString() - } - - case atys["name"] == cty.String: - // "name" isn't always globally unique, but if there isn't also an - // "id" then it _often_ is, in practice. - v := obj.GetAttr("name") - if v.IsKnown() && !v.IsNull() { - return "name", v.AsString() - } - } - - return "", "" -} - -// ObjectValueName takes a value that is assumed to be an object representation -// of some resource instance object and attempts to heuristically find an -// attribute of it that is likely to be a human-friendly name in the remote -// system that it belongs to which will be useful to the user. -// -// If such an attribute is found, its name and string value intended for -// display are returned. Both returned strings are empty if no such attribute -// exists, in which case the caller should assume that the resource instance -// address within the Terraform configuration is the best available identifier. -// -// This is only a best-effort sort of thing, relying on naming conventions in -// our resource type schemas. The result is not guaranteed to be unique, but -// should generally be suitable for display to an end-user anyway. -// -// Callers that use both ObjectValueName and ObjectValueID at the same time -// should be prepared to get the same attribute key and value from both in -// some cases, since there is overlap betweek the id-extraction and -// name-extraction heuristics. -// -// This function will panic if the given value is not of an object type. -func ObjectValueName(obj cty.Value) (k, v string) { - if obj.IsNull() || !obj.IsKnown() { - return "", "" - } - - atys := obj.Type().AttributeTypes() - - switch { - - case atys["name"] == cty.String: - v := obj.GetAttr("name") - if v.IsKnown() && !v.IsNull() { - return "name", v.AsString() - } - - case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: - tags := obj.GetAttr("tags") - if tags.IsNull() || !tags.IsWhollyKnown() { - break - } - - switch { - case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): - v := tags.Index(cty.StringVal("name")) - if v.IsKnown() && !v.IsNull() { - return "tags.name", v.AsString() - } - case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): - // AWS-style naming convention - v := tags.Index(cty.StringVal("Name")) - if v.IsKnown() && !v.IsNull() { - return "tags.Name", v.AsString() - } - } - } - - return "", "" -} - -// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID -// and ObjectValueName (in that preference order) to try to extract some sort -// of human-friendly descriptive string value for an object as additional -// context about an object when it is being displayed in a compact way (where -// not all of the attributes are visible.) -// -// Just as with the two functions it wraps, it is a best-effort and may return -// two empty strings if no suitable attribute can be found for a given object. -func ObjectValueIDOrName(obj cty.Value) (k, v string) { - k, v = ObjectValueID(obj) - if k != "" { - return - } - k, v = ObjectValueName(obj) - return -} diff --git a/vendor/github.com/hashicorp/terraform/command/format/state.go b/vendor/github.com/hashicorp/terraform/command/format/state.go deleted file mode 100644 index 95db8d93..00000000 --- a/vendor/github.com/hashicorp/terraform/command/format/state.go +++ /dev/null @@ -1,329 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/colorstring" -) - -// StateOpts are the options for formatting a state. -type StateOpts struct { - // State is the state to format. This is required. - State *states.State - - // Schemas are used to decode attributes. This is required. - Schemas *terraform.Schemas - - // Color is the colorizer. This is optional. - Color *colorstring.Colorize -} - -// State takes a state and returns a string -func State(opts *StateOpts) string { - if opts.Color == nil { - panic("colorize not given") - } - - if opts.Schemas == nil { - panic("schemas not given") - } - - s := opts.State - if len(s.Modules) == 0 { - return "The state file is empty. No resources are represented." - } - - buf := bytes.NewBufferString("[reset]") - p := blockBodyDiffPrinter{ - buf: buf, - color: opts.Color, - action: plans.NoOp, - } - - // Format all the modules - for _, m := range s.Modules { - formatStateModule(p, m, opts.Schemas) - } - - // Write the outputs for the root module - m := s.RootModule() - - if m.OutputValues != nil { - if len(m.OutputValues) > 0 { - p.buf.WriteString("Outputs:\n\n") - } - - // Sort the outputs - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - // Output each output k/v pair - for _, k := range ks { - v := m.OutputValues[k] - p.buf.WriteString(fmt.Sprintf("%s = ", k)) - p.writeValue(v.Value, plans.NoOp, 0) - p.buf.WriteString("\n") - } - } - - trimmedOutput := strings.TrimSpace(p.buf.String()) - trimmedOutput += "[reset]" - - return opts.Color.Color(trimmedOutput) - -} - -func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { - // First get the names of all the resources so we can show them - // in alphabetical order. - names := make([]string, 0, len(m.Resources)) - for name := range m.Resources { - names = append(names, name) - } - sort.Strings(names) - - // Go through each resource and begin building up the output. - for _, key := range names { - for k, v := range m.Resources[key].Instances { - // keep these in order to keep the current object first, and - // provide deterministic output for the deposed objects - type obj struct { - header string - instance *states.ResourceInstanceObjectSrc - } - instances := []obj{} - - addr := m.Resources[key].Addr - - taintStr := "" - if v.Current != nil && v.Current.Status == 'T' { - taintStr = " (tainted)" - } - - instances = append(instances, - obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current}) - - for dk, v := range v.Deposed { - instances = append(instances, - obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v}) - } - - // Sort the instances for consistent output. - // Starting the sort from the second index, so the current instance - // is always first. - sort.Slice(instances[1:], func(i, j int) bool { - return instances[i+1].header < instances[j+1].header - }) - - for _, obj := range instances { - header := obj.header - instance := obj.instance - p.buf.WriteString(header) - if instance == nil { - // this shouldn't happen, but there's nothing to do here so - // don't panic below. - continue - } - - var schema *configschema.Block - - // TODO: Get the provider FQN when it is available from the - // AbsoluteProviderConfig, in state. - // - // Check if the resource has a configured provider, otherwise - // use the default provider. - provider := m.Resources[key].ProviderConfig.ProviderConfig.Type - if _, exists := schemas.Providers[provider.LegacyString()]; !exists { - // This should never happen in normal use because we - // should've loaded all of the schemas and checked things - // prior to this point. We can't return errors here, but - // since this is UI code we will try to do _something_ - // reasonable. - p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider.LegacyString())) - continue - } - - switch addr.Mode { - case addrs.ManagedResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider.LegacyString(), - addr.Mode, - addr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q resource type %s\n\n", provider.LegacyString(), addr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "resource %q %q {", - addr.Type, - addr.Name, - )) - case addrs.DataResourceMode: - schema, _ = schemas.ResourceTypeConfig( - provider.LegacyString(), - addr.Mode, - addr.Type, - ) - if schema == nil { - p.buf.WriteString(fmt.Sprintf( - "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) - continue - } - - p.buf.WriteString(fmt.Sprintf( - "data %q %q {", - addr.Type, - addr.Name, - )) - default: - // should never happen, since the above is exhaustive - p.buf.WriteString(addr.String()) - } - - val, err := instance.Decode(schema.ImpliedType()) - if err != nil { - fmt.Println(err.Error()) - break - } - - path := make(cty.Path, 0, 3) - bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) - if bodyWritten { - p.buf.WriteString("\n") - } - - p.buf.WriteString("}\n\n") - } - } - } - p.buf.WriteString("\n") -} - -func formatNestedList(indent string, outputList []interface{}) string { - outputBuf := new(bytes.Buffer) - outputBuf.WriteString(fmt.Sprintf("%s[", indent)) - - lastIdx := len(outputList) - 1 - - for i, value := range outputList { - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value)) - if i != lastIdx { - outputBuf.WriteString(",") - } - } - - outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatListOutput(indent, outputName string, outputList []interface{}) string { - keyIndent := "" - - outputBuf := new(bytes.Buffer) - - if outputName != "" { - outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName)) - keyIndent = " " - } - - lastIdx := len(outputList) - 1 - - for i, value := range outputList { - switch typedValue := value.(type) { - case string: - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) - case []interface{}: - outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, - formatNestedList(indent+keyIndent, typedValue))) - case map[string]interface{}: - outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, - formatNestedMap(indent+keyIndent, typedValue))) - } - - if lastIdx != i { - outputBuf.WriteString(",") - } - } - - if outputName != "" { - if len(outputList) > 0 { - outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) - } else { - outputBuf.WriteString("]") - } - } - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatNestedMap(indent string, outputMap map[string]interface{}) string { - ks := make([]string, 0, len(outputMap)) - for k := range outputMap { - ks = append(ks, k) - } - sort.Strings(ks) - - outputBuf := new(bytes.Buffer) - outputBuf.WriteString(fmt.Sprintf("%s{", indent)) - - lastIdx := len(outputMap) - 1 - for i, k := range ks { - v := outputMap[k] - outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v)) - - if lastIdx != i { - outputBuf.WriteString(",") - } - } - - outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { - ks := make([]string, 0, len(outputMap)) - for k := range outputMap { - ks = append(ks, k) - } - sort.Strings(ks) - - keyIndent := "" - - outputBuf := new(bytes.Buffer) - if outputName != "" { - outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName)) - keyIndent = " " - } - - for _, k := range ks { - v := outputMap[k] - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v)) - } - - if outputName != "" { - if len(outputMap) > 0 { - outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) - } else { - outputBuf.WriteString("}") - } - } - - return strings.TrimPrefix(outputBuf.String(), "\n") -} diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go deleted file mode 100644 index 9d80c42b..00000000 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ /dev/null @@ -1,92 +0,0 @@ -package config - -// Append appends one configuration to another. -// -// Append assumes that both configurations will not have -// conflicting variables, resources, etc. If they do, the -// problems will be caught in the validation phase. -// -// It is possible that c1, c2 on their own are not valid. For -// example, a resource in c2 may reference a variable in c1. But -// together, they would be valid. -func Append(c1, c2 *Config) (*Config, error) { - c := new(Config) - - // Append unknown keys, but keep them unique since it is a set - unknowns := make(map[string]struct{}) - for _, k := range c1.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - for _, k := range c2.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - c.Atlas = c1.Atlas - if c2.Atlas != nil { - c.Atlas = c2.Atlas - } - - // merge Terraform blocks - if c1.Terraform != nil { - c.Terraform = c1.Terraform - if c2.Terraform != nil { - c.Terraform.Merge(c2.Terraform) - } - } else { - c.Terraform = c2.Terraform - } - - if len(c1.Modules) > 0 || len(c2.Modules) > 0 { - c.Modules = make( - []*Module, 0, len(c1.Modules)+len(c2.Modules)) - c.Modules = append(c.Modules, c1.Modules...) - c.Modules = append(c.Modules, c2.Modules...) - } - - if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 { - c.Outputs = make( - []*Output, 0, len(c1.Outputs)+len(c2.Outputs)) - c.Outputs = append(c.Outputs, c1.Outputs...) - c.Outputs = append(c.Outputs, c2.Outputs...) - } - - if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 { - c.ProviderConfigs = make( - []*ProviderConfig, - 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs)) - c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...) - c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...) - } - - if len(c1.Resources) > 0 || len(c2.Resources) > 0 { - c.Resources = make( - []*Resource, - 0, len(c1.Resources)+len(c2.Resources)) - c.Resources = append(c.Resources, c1.Resources...) - c.Resources = append(c.Resources, c2.Resources...) - } - - if len(c1.Variables) > 0 || len(c2.Variables) > 0 { - c.Variables = make( - []*Variable, 0, len(c1.Variables)+len(c2.Variables)) - c.Variables = append(c.Variables, c1.Variables...) - c.Variables = append(c.Variables, c2.Variables...) - } - - if len(c1.Locals) > 0 || len(c2.Locals) > 0 { - c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) - c.Locals = append(c.Locals, c1.Locals...) - c.Locals = append(c.Locals, c2.Locals...) - } - - return c, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go deleted file mode 100644 index 497effef..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ /dev/null @@ -1,1171 +0,0 @@ -// The config package is responsible for loading and validating the -// configuration. -package config - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - hcl2 "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/helper/hilmapstructure" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/tfdiags" - "github.com/mitchellh/reflectwalk" -) - -// NameRegexp is the regular expression that all names (modules, providers, -// resources, etc.) must follow. -var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`) - -// Config is the configuration that comes from loading a collection -// of Terraform templates. -type Config struct { - // Dir is the path to the directory where this configuration was - // loaded from. If it is blank, this configuration wasn't loaded from - // any meaningful directory. - Dir string - - Terraform *Terraform - Atlas *AtlasConfig - Modules []*Module - ProviderConfigs []*ProviderConfig - Resources []*Resource - Variables []*Variable - Locals []*Local - Outputs []*Output - - // The fields below can be filled in by loaders for validation - // purposes. - unknownKeys []string -} - -// AtlasConfig is the configuration for building in HashiCorp's Atlas. -type AtlasConfig struct { - Name string - Include []string - Exclude []string -} - -// Module is a module used within a configuration. -// -// This does not represent a module itself, this represents a module -// call-site within an existing configuration. -type Module struct { - Name string - Source string - Version string - Providers map[string]string - RawConfig *RawConfig -} - -// ProviderConfig is the configuration for a resource provider. -// -// For example, Terraform needs to set the AWS access keys for the AWS -// resource provider. -type ProviderConfig struct { - Name string - Alias string - Version string - RawConfig *RawConfig -} - -// A resource represents a single Terraform resource in the configuration. -// A Terraform resource is something that supports some or all of the -// usual "create, read, update, delete" operations, depending on -// the given Mode. -type Resource struct { - Mode ResourceMode // which operations the resource supports - Name string - Type string - RawCount *RawConfig - RawConfig *RawConfig - Provisioners []*Provisioner - Provider string - DependsOn []string - Lifecycle ResourceLifecycle -} - -// Copy returns a copy of this Resource. Helpful for avoiding shared -// config pointers across multiple pieces of the graph that need to do -// interpolation. -func (r *Resource) Copy() *Resource { - n := &Resource{ - Mode: r.Mode, - Name: r.Name, - Type: r.Type, - RawCount: r.RawCount.Copy(), - RawConfig: r.RawConfig.Copy(), - Provisioners: make([]*Provisioner, 0, len(r.Provisioners)), - Provider: r.Provider, - DependsOn: make([]string, len(r.DependsOn)), - Lifecycle: *r.Lifecycle.Copy(), - } - for _, p := range r.Provisioners { - n.Provisioners = append(n.Provisioners, p.Copy()) - } - copy(n.DependsOn, r.DependsOn) - return n -} - -// ResourceLifecycle is used to store the lifecycle tuning parameters -// to allow customized behavior -type ResourceLifecycle struct { - CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` - PreventDestroy bool `mapstructure:"prevent_destroy"` - IgnoreChanges []string `mapstructure:"ignore_changes"` -} - -// Copy returns a copy of this ResourceLifecycle -func (r *ResourceLifecycle) Copy() *ResourceLifecycle { - n := &ResourceLifecycle{ - CreateBeforeDestroy: r.CreateBeforeDestroy, - PreventDestroy: r.PreventDestroy, - IgnoreChanges: make([]string, len(r.IgnoreChanges)), - } - copy(n.IgnoreChanges, r.IgnoreChanges) - return n -} - -// Provisioner is a configured provisioner step on a resource. -type Provisioner struct { - Type string - RawConfig *RawConfig - ConnInfo *RawConfig - - When ProvisionerWhen - OnFailure ProvisionerOnFailure -} - -// Copy returns a copy of this Provisioner -func (p *Provisioner) Copy() *Provisioner { - return &Provisioner{ - Type: p.Type, - RawConfig: p.RawConfig.Copy(), - ConnInfo: p.ConnInfo.Copy(), - When: p.When, - OnFailure: p.OnFailure, - } -} - -// Variable is a module argument defined within the configuration. -type Variable struct { - Name string - DeclaredType string `mapstructure:"type"` - Default interface{} - Description string -} - -// Local is a local value defined within the configuration. -type Local struct { - Name string - RawConfig *RawConfig -} - -// Output is an output defined within the configuration. An output is -// resulting data that is highlighted by Terraform when finished. An -// output marked Sensitive will be output in a masked form following -// application, but will still be available in state. -type Output struct { - Name string - DependsOn []string - Description string - Sensitive bool - RawConfig *RawConfig -} - -// VariableType is the type of value a variable is holding, and returned -// by the Type() function on variables. -type VariableType byte - -const ( - VariableTypeUnknown VariableType = iota - VariableTypeString - VariableTypeList - VariableTypeMap -) - -func (v VariableType) Printable() string { - switch v { - case VariableTypeString: - return "string" - case VariableTypeMap: - return "map" - case VariableTypeList: - return "list" - default: - return "unknown" - } -} - -// ProviderConfigName returns the name of the provider configuration in -// the given mapping that maps to the proper provider configuration -// for this resource. -func ProviderConfigName(t string, pcs []*ProviderConfig) string { - lk := "" - for _, v := range pcs { - k := v.Name - if strings.HasPrefix(t, k) && len(k) > len(lk) { - lk = k - } - } - - return lk -} - -// A unique identifier for this module. -func (r *Module) Id() string { - return fmt.Sprintf("%s", r.Name) -} - -// Count returns the count of this resource. -func (r *Resource) Count() (int, error) { - raw := r.RawCount.Value() - count, ok := r.RawCount.Value().(string) - if !ok { - return 0, fmt.Errorf( - "expected count to be a string or int, got %T", raw) - } - - v, err := strconv.ParseInt(count, 0, 0) - if err != nil { - return 0, fmt.Errorf( - "cannot parse %q as an integer", - count, - ) - } - - return int(v), nil -} - -// A unique identifier for this resource. -func (r *Resource) Id() string { - switch r.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", r.Type, r.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", r.Type, r.Name) - default: - panic(fmt.Errorf("unknown resource mode %s", r.Mode)) - } -} - -// Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() tfdiags.Diagnostics { - if c == nil { - return nil - } - - var diags tfdiags.Diagnostics - - for _, k := range c.unknownKeys { - diags = diags.Append( - fmt.Errorf("Unknown root level key: %s", k), - ) - } - - // Validate the Terraform config - if tf := c.Terraform; tf != nil { - errs := c.Terraform.Validate() - for _, err := range errs { - diags = diags.Append(err) - } - } - - vars := c.InterpolatedVariables() - varMap := make(map[string]*Variable) - for _, v := range c.Variables { - if _, ok := varMap[v.Name]; ok { - diags = diags.Append(fmt.Errorf( - "Variable '%s': duplicate found. Variable names must be unique.", - v.Name, - )) - } - - varMap[v.Name] = v - } - - for k, _ := range varMap { - if !NameRegexp.MatchString(k) { - diags = diags.Append(fmt.Errorf( - "variable %q: variable name must match regular expression %s", - k, NameRegexp, - )) - } - } - - for _, v := range c.Variables { - if v.Type() == VariableTypeUnknown { - diags = diags.Append(fmt.Errorf( - "Variable '%s': must be a string or a map", - v.Name, - )) - continue - } - - interp := false - fn := func(n ast.Node) (interface{}, error) { - // LiteralNode is a literal string (outside of a ${ ... } sequence). - // interpolationWalker skips most of these. but in particular it - // visits those that have escaped sequences (like $${foo}) as a - // signal that *some* processing is required on this string. For - // our purposes here though, this is fine and not an interpolation. - if _, ok := n.(*ast.LiteralNode); !ok { - interp = true - } - return "", nil - } - - w := &interpolationWalker{F: fn} - if v.Default != nil { - if err := reflectwalk.Walk(v.Default, w); err == nil { - if interp { - diags = diags.Append(fmt.Errorf( - "variable %q: default may not contain interpolations", - v.Name, - )) - } - } - } - } - - // Check for references to user variables that do not actually - // exist and record those errors. - for source, vs := range vars { - for _, v := range vs { - uv, ok := v.(*UserVariable) - if !ok { - continue - } - - if _, ok := varMap[uv.Name]; !ok { - diags = diags.Append(fmt.Errorf( - "%s: unknown variable referenced: '%s'; define it with a 'variable' block", - source, - uv.Name, - )) - } - } - } - - // Check that all count variables are valid. - for source, vs := range vars { - for _, rawV := range vs { - switch v := rawV.(type) { - case *CountVariable: - if v.Type == CountValueInvalid { - diags = diags.Append(fmt.Errorf( - "%s: invalid count variable: %s", - source, - v.FullKey(), - )) - } - case *PathVariable: - if v.Type == PathValueInvalid { - diags = diags.Append(fmt.Errorf( - "%s: invalid path variable: %s", - source, - v.FullKey(), - )) - } - } - } - } - - // Check that providers aren't declared multiple times and that their - // version constraints, where present, are syntactically valid. - providerSet := make(map[string]bool) - for _, p := range c.ProviderConfigs { - name := p.FullName() - if _, ok := providerSet[name]; ok { - diags = diags.Append(fmt.Errorf( - "provider.%s: multiple configurations present; only one configuration is allowed per provider", - name, - )) - continue - } - - if p.Version != "" { - _, err := discovery.ConstraintStr(p.Version).Parse() - if err != nil { - diags = diags.Append(&hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid provider version constraint", - Detail: fmt.Sprintf( - "The value %q given for provider.%s is not a valid version constraint.", - p.Version, name, - ), - // TODO: include a "Subject" source reference in here, - // once the config loader is able to retain source - // location information. - }) - } - } - - providerSet[name] = true - } - - // Check that all references to modules are valid - modules := make(map[string]*Module) - dupped := make(map[string]struct{}) - for _, m := range c.Modules { - // Check for duplicates - if _, ok := modules[m.Id()]; ok { - if _, ok := dupped[m.Id()]; !ok { - dupped[m.Id()] = struct{}{} - - diags = diags.Append(fmt.Errorf( - "module %q: module repeated multiple times", - m.Id(), - )) - } - - // Already seen this module, just skip it - continue - } - - modules[m.Id()] = m - - // Check that the source has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": m.Source, - }) - if err != nil { - diags = diags.Append(fmt.Errorf( - "module %q: module source error: %s", - m.Id(), err, - )) - } else if len(rc.Interpolations) > 0 { - diags = diags.Append(fmt.Errorf( - "module %q: module source cannot contain interpolations", - m.Id(), - )) - } - - // Check that the name matches our regexp - if !NameRegexp.Match([]byte(m.Name)) { - diags = diags.Append(fmt.Errorf( - "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores", - m.Id(), - )) - } - - // Check that the configuration can all be strings, lists or maps - raw := make(map[string]interface{}) - for k, v := range m.RawConfig.Raw { - var strVal string - if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { - raw[k] = strVal - continue - } - - var mapVal map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil { - raw[k] = mapVal - continue - } - - var sliceVal []interface{} - if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil { - raw[k] = sliceVal - continue - } - - diags = diags.Append(fmt.Errorf( - "module %q: argument %s must have a string, list, or map value", - m.Id(), k, - )) - } - - // Check for invalid count variables - for _, v := range m.RawConfig.Variables { - switch v.(type) { - case *CountVariable: - diags = diags.Append(fmt.Errorf( - "module %q: count variables are only valid within resources", - m.Name, - )) - case *SelfVariable: - diags = diags.Append(fmt.Errorf( - "module %q: self variables are only valid within resources", - m.Name, - )) - } - } - - // Update the raw configuration to only contain the string values - m.RawConfig, err = NewRawConfig(raw) - if err != nil { - diags = diags.Append(fmt.Errorf( - "%s: can't initialize configuration: %s", - m.Id(), err, - )) - } - - // check that all named providers actually exist - for _, p := range m.Providers { - if !providerSet[p] { - diags = diags.Append(fmt.Errorf( - "module %q: cannot pass non-existent provider %q", - m.Name, p, - )) - } - } - - } - dupped = nil - - // Check that all variables for modules reference modules that - // exist. - for source, vs := range vars { - for _, v := range vs { - mv, ok := v.(*ModuleVariable) - if !ok { - continue - } - - if _, ok := modules[mv.Name]; !ok { - diags = diags.Append(fmt.Errorf( - "%s: unknown module referenced: %s", - source, mv.Name, - )) - } - } - } - - // Check that all references to resources are valid - resources := make(map[string]*Resource) - dupped = make(map[string]struct{}) - for _, r := range c.Resources { - if _, ok := resources[r.Id()]; ok { - if _, ok := dupped[r.Id()]; !ok { - dupped[r.Id()] = struct{}{} - - diags = diags.Append(fmt.Errorf( - "%s: resource repeated multiple times", - r.Id(), - )) - } - } - - resources[r.Id()] = r - } - dupped = nil - - // Validate resources - for n, r := range resources { - // Verify count variables - for _, v := range r.RawCount.Variables { - switch v.(type) { - case *CountVariable: - diags = diags.Append(fmt.Errorf( - "%s: resource count can't reference count variable: %s", - n, v.FullKey(), - )) - case *SimpleVariable: - diags = diags.Append(fmt.Errorf( - "%s: resource count can't reference variable: %s", - n, v.FullKey(), - )) - - // Good - case *ModuleVariable: - case *ResourceVariable: - case *TerraformVariable: - case *UserVariable: - case *LocalVariable: - - default: - diags = diags.Append(fmt.Errorf( - "Internal error. Unknown type in count var in %s: %T", - n, v, - )) - } - } - - if !r.RawCount.couldBeInteger() { - diags = diags.Append(fmt.Errorf( - "%s: resource count must be an integer", n, - )) - } - r.RawCount.init() - - // Validate DependsOn - for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) { - diags = diags.Append(err) - } - - // Verify provisioners - for _, p := range r.Provisioners { - // This validation checks that there are no splat variables - // referencing ourself. This currently is not allowed. - - for _, v := range p.ConnInfo.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - diags = diags.Append(fmt.Errorf( - "%s: connection info cannot contain splat variable referencing itself", - n, - )) - break - } - } - - for _, v := range p.RawConfig.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - diags = diags.Append(fmt.Errorf( - "%s: connection info cannot contain splat variable referencing itself", - n, - )) - break - } - } - - // Check for invalid when/onFailure values, though this should be - // picked up by the loader we check here just in case. - if p.When == ProvisionerWhenInvalid { - diags = diags.Append(fmt.Errorf( - "%s: provisioner 'when' value is invalid", n, - )) - } - if p.OnFailure == ProvisionerOnFailureInvalid { - diags = diags.Append(fmt.Errorf( - "%s: provisioner 'on_failure' value is invalid", n, - )) - } - } - - // Verify ignore_changes contains valid entries - for _, v := range r.Lifecycle.IgnoreChanges { - if strings.Contains(v, "*") && v != "*" { - diags = diags.Append(fmt.Errorf( - "%s: ignore_changes does not support using a partial string together with a wildcard: %s", - n, v, - )) - } - } - - // Verify ignore_changes has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": r.Lifecycle.IgnoreChanges, - }) - if err != nil { - diags = diags.Append(fmt.Errorf( - "%s: lifecycle ignore_changes error: %s", - n, err, - )) - } else if len(rc.Interpolations) > 0 { - diags = diags.Append(fmt.Errorf( - "%s: lifecycle ignore_changes cannot contain interpolations", - n, - )) - } - - // If it is a data source then it can't have provisioners - if r.Mode == DataResourceMode { - if _, ok := r.RawConfig.Raw["provisioner"]; ok { - diags = diags.Append(fmt.Errorf( - "%s: data sources cannot have provisioners", - n, - )) - } - } - } - - for source, vs := range vars { - for _, v := range vs { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - id := rv.ResourceId() - if _, ok := resources[id]; !ok { - diags = diags.Append(fmt.Errorf( - "%s: unknown resource '%s' referenced in variable %s", - source, - id, - rv.FullKey(), - )) - continue - } - } - } - - // Check that all locals are valid - { - found := make(map[string]struct{}) - for _, l := range c.Locals { - if _, ok := found[l.Name]; ok { - diags = diags.Append(fmt.Errorf( - "%s: duplicate local. local value names must be unique", - l.Name, - )) - continue - } - found[l.Name] = struct{}{} - - for _, v := range l.RawConfig.Variables { - if _, ok := v.(*CountVariable); ok { - diags = diags.Append(fmt.Errorf( - "local %s: count variables are only valid within resources", l.Name, - )) - } - } - } - } - - // Check that all outputs are valid - { - found := make(map[string]struct{}) - for _, o := range c.Outputs { - // Verify the output is new - if _, ok := found[o.Name]; ok { - diags = diags.Append(fmt.Errorf( - "output %q: an output of this name was already defined", - o.Name, - )) - continue - } - found[o.Name] = struct{}{} - - var invalidKeys []string - valueKeyFound := false - for k := range o.RawConfig.Raw { - if k == "value" { - valueKeyFound = true - continue - } - if k == "sensitive" { - if sensitive, ok := o.RawConfig.config[k].(bool); ok { - if sensitive { - o.Sensitive = true - } - continue - } - - diags = diags.Append(fmt.Errorf( - "output %q: value for 'sensitive' must be boolean", - o.Name, - )) - continue - } - if k == "description" { - if desc, ok := o.RawConfig.config[k].(string); ok { - o.Description = desc - continue - } - - diags = diags.Append(fmt.Errorf( - "output %q: value for 'description' must be string", - o.Name, - )) - continue - } - invalidKeys = append(invalidKeys, k) - } - if len(invalidKeys) > 0 { - diags = diags.Append(fmt.Errorf( - "output %q: invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "), - )) - } - if !valueKeyFound { - diags = diags.Append(fmt.Errorf( - "output %q: missing required 'value' argument", o.Name, - )) - } - - for _, v := range o.RawConfig.Variables { - if _, ok := v.(*CountVariable); ok { - diags = diags.Append(fmt.Errorf( - "output %q: count variables are only valid within resources", - o.Name, - )) - } - } - - // Detect a common mistake of using a "count"ed resource in - // an output value without using the splat or index form. - // Prior to 0.11 this error was silently ignored, but outputs - // now have their errors checked like all other contexts. - // - // TODO: Remove this in 0.12. - for _, v := range o.RawConfig.Variables { - rv, ok := v.(*ResourceVariable) - if !ok { - continue - } - - // If the variable seems to be treating the referenced - // resource as a singleton (no count specified) then - // we'll check to make sure it is indeed a singleton. - // It's a warning if not. - - if rv.Multi || rv.Index != 0 { - // This reference is treating the resource as a - // multi-resource, so the warning doesn't apply. - continue - } - - for _, r := range c.Resources { - if r.Id() != rv.ResourceId() { - continue - } - - // We test specifically for the raw string "1" here - // because we _do_ want to generate this warning if - // the user has provided an expression that happens - // to return 1 right now, to catch situations where - // a count might dynamically be set to something - // other than 1 and thus splat syntax is still needed - // to be safe. - if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { - diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( - "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", - o.Name, - r.Id(), rv.Field, - r.Id(), rv.Field, - ))) - } - } - } - } - } - - // Validate the self variable - for source, rc := range c.rawConfigs() { - // Ignore provisioners. This is a pretty brittle way to do this, - // but better than also repeating all the resources. - if strings.Contains(source, "provision") { - continue - } - - for _, v := range rc.Variables { - if _, ok := v.(*SelfVariable); ok { - diags = diags.Append(fmt.Errorf( - "%s: cannot contain self-reference %s", - source, v.FullKey(), - )) - } - } - } - - return diags -} - -// InterpolatedVariables is a helper that returns a mapping of all the interpolated -// variables within the configuration. This is used to verify references -// are valid in the Validate step. -func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable { - result := make(map[string][]InterpolatedVariable) - for source, rc := range c.rawConfigs() { - for _, v := range rc.Variables { - result[source] = append(result[source], v) - } - } - return result -} - -// rawConfigs returns all of the RawConfigs that are available keyed by -// a human-friendly source. -func (c *Config) rawConfigs() map[string]*RawConfig { - result := make(map[string]*RawConfig) - for _, m := range c.Modules { - source := fmt.Sprintf("module '%s'", m.Name) - result[source] = m.RawConfig - } - - for _, pc := range c.ProviderConfigs { - source := fmt.Sprintf("provider config '%s'", pc.Name) - result[source] = pc.RawConfig - } - - for _, rc := range c.Resources { - source := fmt.Sprintf("resource '%s'", rc.Id()) - result[source+" count"] = rc.RawCount - result[source+" config"] = rc.RawConfig - - for i, p := range rc.Provisioners { - subsource := fmt.Sprintf( - "%s provisioner %s (#%d)", - source, p.Type, i+1) - result[subsource] = p.RawConfig - } - } - - for _, o := range c.Outputs { - source := fmt.Sprintf("output '%s'", o.Name) - result[source] = o.RawConfig - } - - return result -} - -func (c *Config) validateDependsOn( - n string, - v []string, - resources map[string]*Resource, - modules map[string]*Module) []error { - // Verify depends on points to resources that all exist - var errs []error - for _, d := range v { - // Check if we contain interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "value": d, - }) - if err == nil && len(rc.Variables) > 0 { - errs = append(errs, fmt.Errorf( - "%s: depends on value cannot contain interpolations: %s", - n, d)) - continue - } - - // If it is a module, verify it is a module - if strings.HasPrefix(d, "module.") { - name := d[len("module."):] - if _, ok := modules[name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent module '%s'", - n, name)) - } - - continue - } - - // Check resources - if _, ok := resources[d]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent resource '%s'", - n, d)) - } - } - - return errs -} - -func (m *Module) mergerName() string { - return m.Id() -} - -func (m *Module) mergerMerge(other merger) merger { - m2 := other.(*Module) - - result := *m - result.Name = m2.Name - result.RawConfig = result.RawConfig.merge(m2.RawConfig) - - if m2.Source != "" { - result.Source = m2.Source - } - - return &result -} - -func (o *Output) mergerName() string { - return o.Name -} - -func (o *Output) mergerMerge(m merger) merger { - o2 := m.(*Output) - - result := *o - result.Name = o2.Name - result.Description = o2.Description - result.RawConfig = result.RawConfig.merge(o2.RawConfig) - result.Sensitive = o2.Sensitive - result.DependsOn = o2.DependsOn - - return &result -} - -func (c *ProviderConfig) GoString() string { - return fmt.Sprintf("*%#v", *c) -} - -func (c *ProviderConfig) FullName() string { - if c.Alias == "" { - return c.Name - } - - return fmt.Sprintf("%s.%s", c.Name, c.Alias) -} - -func (c *ProviderConfig) mergerName() string { - return c.Name -} - -func (c *ProviderConfig) mergerMerge(m merger) merger { - c2 := m.(*ProviderConfig) - - result := *c - result.Name = c2.Name - result.RawConfig = result.RawConfig.merge(c2.RawConfig) - - if c2.Alias != "" { - result.Alias = c2.Alias - } - - return &result -} - -func (r *Resource) mergerName() string { - return r.Id() -} - -func (r *Resource) mergerMerge(m merger) merger { - r2 := m.(*Resource) - - result := *r - result.Mode = r2.Mode - result.Name = r2.Name - result.Type = r2.Type - result.RawConfig = result.RawConfig.merge(r2.RawConfig) - - if r2.RawCount.Value() != "1" { - result.RawCount = r2.RawCount - } - - if len(r2.Provisioners) > 0 { - result.Provisioners = r2.Provisioners - } - - return &result -} - -// Merge merges two variables to create a new third variable. -func (v *Variable) Merge(v2 *Variable) *Variable { - // Shallow copy the variable - result := *v - - // The names should be the same, but the second name always wins. - result.Name = v2.Name - - if v2.DeclaredType != "" { - result.DeclaredType = v2.DeclaredType - } - if v2.Default != nil { - result.Default = v2.Default - } - if v2.Description != "" { - result.Description = v2.Description - } - - return &result -} - -var typeStringMap = map[string]VariableType{ - "string": VariableTypeString, - "map": VariableTypeMap, - "list": VariableTypeList, -} - -// Type returns the type of variable this is. -func (v *Variable) Type() VariableType { - if v.DeclaredType != "" { - declaredType, ok := typeStringMap[v.DeclaredType] - if !ok { - return VariableTypeUnknown - } - - return declaredType - } - - return v.inferTypeFromDefault() -} - -// ValidateTypeAndDefault ensures that default variable value is compatible -// with the declared type (if one exists), and that the type is one which is -// known to Terraform -func (v *Variable) ValidateTypeAndDefault() error { - // If an explicit type is declared, ensure it is valid - if v.DeclaredType != "" { - if _, ok := typeStringMap[v.DeclaredType]; !ok { - validTypes := []string{} - for k := range typeStringMap { - validTypes = append(validTypes, k) - } - return fmt.Errorf( - "Variable '%s' type must be one of [%s] - '%s' is not a valid type", - v.Name, - strings.Join(validTypes, ", "), - v.DeclaredType, - ) - } - } - - if v.DeclaredType == "" || v.Default == nil { - return nil - } - - if v.inferTypeFromDefault() != v.Type() { - return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')", - v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable()) - } - - return nil -} - -func (v *Variable) mergerName() string { - return v.Name -} - -func (v *Variable) mergerMerge(m merger) merger { - return v.Merge(m.(*Variable)) -} - -// Required tests whether a variable is required or not. -func (v *Variable) Required() bool { - return v.Default == nil -} - -// inferTypeFromDefault contains the logic for the old method of inferring -// variable types - we can also use this for validating that the declared -// type matches the type of the default value -func (v *Variable) inferTypeFromDefault() VariableType { - if v.Default == nil { - return VariableTypeString - } - - var s string - if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil { - v.Default = s - return VariableTypeString - } - - var m map[string]interface{} - if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil { - v.Default = m - return VariableTypeMap - } - - var l []interface{} - if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil { - v.Default = l - return VariableTypeList - } - - return VariableTypeUnknown -} - -func (m ResourceMode) Taintable() bool { - switch m { - case ManagedResourceMode: - return true - case DataResourceMode: - return false - default: - panic(fmt.Errorf("unsupported ResourceMode value %s", m)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go deleted file mode 100644 index a6933c2a..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ /dev/null @@ -1,378 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// TestString is a Stringer-like function that outputs a string that can -// be used to easily compare multiple Config structures in unit tests. -// -// This function has no practical use outside of unit tests and debugging. -func (c *Config) TestString() string { - if c == nil { - return "" - } - - var buf bytes.Buffer - if len(c.Modules) > 0 { - buf.WriteString("Modules:\n\n") - buf.WriteString(modulesStr(c.Modules)) - buf.WriteString("\n\n") - } - - if len(c.Variables) > 0 { - buf.WriteString("Variables:\n\n") - buf.WriteString(variablesStr(c.Variables)) - buf.WriteString("\n\n") - } - - if len(c.ProviderConfigs) > 0 { - buf.WriteString("Provider Configs:\n\n") - buf.WriteString(providerConfigsStr(c.ProviderConfigs)) - buf.WriteString("\n\n") - } - - if len(c.Resources) > 0 { - buf.WriteString("Resources:\n\n") - buf.WriteString(resourcesStr(c.Resources)) - buf.WriteString("\n\n") - } - - if len(c.Outputs) > 0 { - buf.WriteString("Outputs:\n\n") - buf.WriteString(outputsStr(c.Outputs)) - buf.WriteString("\n") - } - - return strings.TrimSpace(buf.String()) -} - -func terraformStr(t *Terraform) string { - result := "" - - if b := t.Backend; b != nil { - result += fmt.Sprintf("backend (%s)\n", b.Type) - - keys := make([]string, 0, len(b.RawConfig.Raw)) - for k, _ := range b.RawConfig.Raw { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - result += fmt.Sprintf(" %s\n", k) - } - } - - return strings.TrimSpace(result) -} - -func modulesStr(ms []*Module) string { - result := "" - order := make([]int, 0, len(ms)) - ks := make([]string, 0, len(ms)) - mapping := make(map[string]int) - for i, m := range ms { - k := m.Id() - ks = append(ks, k) - mapping[k] = i - } - sort.Strings(ks) - for _, k := range ks { - order = append(order, mapping[k]) - } - - for _, i := range order { - m := ms[i] - result += fmt.Sprintf("%s\n", m.Id()) - - ks := make([]string, 0, len(m.RawConfig.Raw)) - for k, _ := range m.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - result += fmt.Sprintf(" source = %s\n", m.Source) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - } - - return strings.TrimSpace(result) -} - -func outputsStr(os []*Output) string { - ns := make([]string, 0, len(os)) - m := make(map[string]*Output) - for _, o := range os { - ns = append(ns, o.Name) - m[o.Name] = o - } - sort.Strings(ns) - - result := "" - for _, n := range ns { - o := m[n] - - result += fmt.Sprintf("%s\n", n) - - if len(o.DependsOn) > 0 { - result += fmt.Sprintf(" dependsOn\n") - for _, d := range o.DependsOn { - result += fmt.Sprintf(" %s\n", d) - } - } - - if len(o.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range o.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - - if o.Description != "" { - result += fmt.Sprintf(" description\n %s\n", o.Description) - } - } - - return strings.TrimSpace(result) -} - -func localsStr(ls []*Local) string { - ns := make([]string, 0, len(ls)) - m := make(map[string]*Local) - for _, l := range ls { - ns = append(ns, l.Name) - m[l.Name] = l - } - sort.Strings(ns) - - result := "" - for _, n := range ns { - l := m[n] - - result += fmt.Sprintf("%s\n", n) - - if len(l.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range l.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a provider configs field into a deterministic -// string value for comparison in tests. -func providerConfigsStr(pcs []*ProviderConfig) string { - result := "" - - ns := make([]string, 0, len(pcs)) - m := make(map[string]*ProviderConfig) - for _, n := range pcs { - ns = append(ns, n.Name) - m[n.Name] = n - } - sort.Strings(ns) - - for _, n := range ns { - pc := m[n] - - result += fmt.Sprintf("%s\n", n) - - keys := make([]string, 0, len(pc.RawConfig.Raw)) - for k, _ := range pc.RawConfig.Raw { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - result += fmt.Sprintf(" %s\n", k) - } - - if len(pc.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - for _, rawV := range pc.RawConfig.Variables { - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a resources field into a deterministic -// string value for comparison in tests. -func resourcesStr(rs []*Resource) string { - result := "" - order := make([]int, 0, len(rs)) - ks := make([]string, 0, len(rs)) - mapping := make(map[string]int) - for i, r := range rs { - k := r.Id() - ks = append(ks, k) - mapping[k] = i - } - sort.Strings(ks) - for _, k := range ks { - order = append(order, mapping[k]) - } - - for _, i := range order { - r := rs[i] - result += fmt.Sprintf( - "%s (x%s)\n", - r.Id(), - r.RawCount.Value()) - - ks := make([]string, 0, len(r.RawConfig.Raw)) - for k, _ := range r.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - - if len(r.Provisioners) > 0 { - result += fmt.Sprintf(" provisioners\n") - for _, p := range r.Provisioners { - when := "" - if p.When != ProvisionerWhenCreate { - when = fmt.Sprintf(" (%s)", p.When.String()) - } - - result += fmt.Sprintf(" %s%s\n", p.Type, when) - - if p.OnFailure != ProvisionerOnFailureFail { - result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String()) - } - - ks := make([]string, 0, len(p.RawConfig.Raw)) - for k, _ := range p.RawConfig.Raw { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - result += fmt.Sprintf(" %s\n", k) - } - } - } - - if len(r.DependsOn) > 0 { - result += fmt.Sprintf(" dependsOn\n") - for _, d := range r.DependsOn { - result += fmt.Sprintf(" %s\n", d) - } - } - - if len(r.RawConfig.Variables) > 0 { - result += fmt.Sprintf(" vars\n") - - ks := make([]string, 0, len(r.RawConfig.Variables)) - for k, _ := range r.RawConfig.Variables { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - rawV := r.RawConfig.Variables[k] - kind := "unknown" - str := rawV.FullKey() - - switch rawV.(type) { - case *ResourceVariable: - kind = "resource" - case *UserVariable: - kind = "user" - } - - result += fmt.Sprintf(" %s: %s\n", kind, str) - } - } - } - - return strings.TrimSpace(result) -} - -// This helper turns a variables field into a deterministic -// string value for comparison in tests. -func variablesStr(vs []*Variable) string { - result := "" - ks := make([]string, 0, len(vs)) - m := make(map[string]*Variable) - for _, v := range vs { - ks = append(ks, v.Name) - m[v.Name] = v - } - sort.Strings(ks) - - for _, k := range ks { - v := m[k] - - required := "" - if v.Required() { - required = " (required)" - } - - declaredType := "" - if v.DeclaredType != "" { - declaredType = fmt.Sprintf(" (%s)", v.DeclaredType) - } - - if v.Default == nil || v.Default == "" { - v.Default = "<>" - } - if v.Description == "" { - v.Description = "<>" - } - - result += fmt.Sprintf( - "%s%s%s\n %v\n %s\n", - k, - required, - declaredType, - v.Default, - v.Description) - } - - return strings.TrimSpace(result) -} diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go deleted file mode 100644 index 8535c964..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config_terraform.go +++ /dev/null @@ -1,117 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-version" - "github.com/mitchellh/hashstructure" -) - -// Terraform is the Terraform meta-configuration that can be present -// in configuration files for configuring Terraform itself. -type Terraform struct { - RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint) - Backend *Backend // See Backend struct docs -} - -// Validate performs the validation for just the Terraform configuration. -func (t *Terraform) Validate() []error { - var errs []error - - if raw := t.RequiredVersion; raw != "" { - // Check that the value has no interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "root": raw, - }) - if err != nil { - errs = append(errs, fmt.Errorf( - "terraform.required_version: %s", err)) - } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "terraform.required_version: cannot contain interpolations")) - } else { - // Check it is valid - _, err := version.NewConstraint(raw) - if err != nil { - errs = append(errs, fmt.Errorf( - "terraform.required_version: invalid syntax: %s", err)) - } - } - } - - if t.Backend != nil { - errs = append(errs, t.Backend.Validate()...) - } - - return errs -} - -// Merge t with t2. -// Any conflicting fields are overwritten by t2. -func (t *Terraform) Merge(t2 *Terraform) { - if t2.RequiredVersion != "" { - t.RequiredVersion = t2.RequiredVersion - } - - if t2.Backend != nil { - t.Backend = t2.Backend - } -} - -// Backend is the configuration for the "backend" to use with Terraform. -// A backend is responsible for all major behavior of Terraform's core. -// The abstraction layer above the core (the "backend") allows for behavior -// such as remote operation. -type Backend struct { - Type string - RawConfig *RawConfig - - // Hash is a unique hash code representing the original configuration - // of the backend. This won't be recomputed unless Rehash is called. - Hash uint64 -} - -// Rehash returns a unique content hash for this backend's configuration -// as a uint64 value. -func (b *Backend) Rehash() uint64 { - // If we have no backend, the value is zero - if b == nil { - return 0 - } - - // Use hashstructure to hash only our type with the config. - code, err := hashstructure.Hash(map[string]interface{}{ - "type": b.Type, - "config": b.RawConfig.Raw, - }, nil) - - // This should never happen since we have just some basic primitives - // so panic if there is an error. - if err != nil { - panic(err) - } - - return code -} - -func (b *Backend) Validate() []error { - if len(b.RawConfig.Interpolations) > 0 { - return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))} - } - - return nil -} - -const errBackendInterpolations = ` -terraform.backend: configuration cannot contain interpolations - -The backend configuration is loaded by Terraform extremely early, before -the core of Terraform can be initialized. This is necessary because the backend -dictates the behavior of that core. The core is what handles interpolation -processing. Because of this, interpolations cannot be used in backend -configuration. - -If you'd like to parameterize backend configuration, we recommend using -partial configuration with the "-backend-config" flag to "terraform init". -` diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go deleted file mode 100644 index 08dc0fe9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/config_tree.go +++ /dev/null @@ -1,43 +0,0 @@ -package config - -// configTree represents a tree of configurations where the root is the -// first file and its children are the configurations it has imported. -type configTree struct { - Path string - Config *Config - Children []*configTree -} - -// Flatten flattens the entire tree down to a single merged Config -// structure. -func (t *configTree) Flatten() (*Config, error) { - // No children is easy: we're already merged! - if len(t.Children) == 0 { - return t.Config, nil - } - - // Depth-first, merge all the children first. - childConfigs := make([]*Config, len(t.Children)) - for i, ct := range t.Children { - c, err := ct.Flatten() - if err != nil { - return nil, err - } - - childConfigs[i] = c - } - - // Merge all the children in order - config := childConfigs[0] - childConfigs = childConfigs[1:] - for _, config2 := range childConfigs { - var err error - config, err = Merge(config, config2) - if err != nil { - return nil, err - } - } - - // Merge the final merged child config with our own - return Merge(config, t.Config) -} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go deleted file mode 100644 index 08cbc773..00000000 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ /dev/null @@ -1,151 +0,0 @@ -package config - -import ( - "bufio" - "fmt" - "io" - "os" - - "github.com/hashicorp/errwrap" -) - -// configurable is an interface that must be implemented by any configuration -// formats of Terraform in order to return a *Config. -type configurable interface { - Config() (*Config, error) -} - -// importTree is the result of the first-pass load of the configuration -// files. It is a tree of raw configurables and then any children (their -// imports). -// -// An importTree can be turned into a configTree. -type importTree struct { - Path string - Raw configurable - Children []*importTree -} - -// This is the function type that must be implemented by the configuration -// file loader to turn a single file into a configurable and any additional -// imports. -type fileLoaderFunc func(path string) (configurable, []string, error) - -// Set this to a non-empty value at link time to enable the HCL2 experiment. -// This is not currently enabled for release builds. -// -// For example: -// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform -var enableHCL2Experiment = "" - -// loadTree takes a single file and loads the entire importTree for that -// file. This function detects what kind of configuration file it is an -// executes the proper fileLoaderFunc. -func loadTree(root string) (*importTree, error) { - var f fileLoaderFunc - - // HCL2 experiment is currently activated at build time via the linker. - // See the comment on this variable for more information. - if enableHCL2Experiment == "" { - // Main-line behavior: always use the original HCL parser - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: - } - } else { - // Experimental behavior: use the HCL2 parser if the opt-in comment - // is present. - switch ext(root) { - case ".tf": - // We need to sniff the file for the opt-in comment line to decide - // if the file is participating in the HCL2 experiment. - cf, err := os.Open(root) - if err != nil { - return nil, err - } - sc := bufio.NewScanner(cf) - for sc.Scan() { - if sc.Text() == "#terraform:hcl2" { - f = globalHCL2Loader.loadFile - } - } - if f == nil { - f = loadFileHcl - } - case ".tf.json": - f = loadFileHcl - default: - } - } - - if f == nil { - return nil, fmt.Errorf( - "%s: unknown configuration format. Use '.tf' or '.tf.json' extension", - root) - } - - c, imps, err := f(root) - if err != nil { - return nil, err - } - - children := make([]*importTree, len(imps)) - for i, imp := range imps { - t, err := loadTree(imp) - if err != nil { - return nil, err - } - - children[i] = t - } - - return &importTree{ - Path: root, - Raw: c, - Children: children, - }, nil -} - -// Close releases any resources we might be holding open for the importTree. -// -// This can safely be called even while ConfigTree results are alive. The -// importTree is not bound to these. -func (t *importTree) Close() error { - if c, ok := t.Raw.(io.Closer); ok { - c.Close() - } - for _, ct := range t.Children { - ct.Close() - } - - return nil -} - -// ConfigTree traverses the importTree and turns each node into a *Config -// object, ultimately returning a *configTree. -func (t *importTree) ConfigTree() (*configTree, error) { - config, err := t.Raw.Config() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err) - } - - // Build our result - result := &configTree{ - Path: t.Path, - Config: config, - } - - // Build the config trees for the children - result.Children = make([]*configTree, len(t.Children)) - for i, ct := range t.Children { - t, err := ct.ConfigTree() - if err != nil { - return nil, err - } - - result.Children[i] = t - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go deleted file mode 100644 index 599e5ecd..00000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ /dev/null @@ -1,439 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/hil/ast" -) - -// An InterpolatedVariable is a variable reference within an interpolation. -// -// Implementations of this interface represents various sources where -// variables can come from: user variables, resources, etc. -type InterpolatedVariable interface { - FullKey() string - SourceRange() tfdiags.SourceRange -} - -// varRange can be embedded into an InterpolatedVariable implementation to -// implement the SourceRange method. -type varRange struct { - rng tfdiags.SourceRange -} - -func (r varRange) SourceRange() tfdiags.SourceRange { - return r.rng -} - -func makeVarRange(rng tfdiags.SourceRange) varRange { - return varRange{rng} -} - -// CountVariable is a variable for referencing information about -// the count. -type CountVariable struct { - Type CountValueType - key string - varRange -} - -// CountValueType is the type of the count variable that is referenced. -type CountValueType byte - -const ( - CountValueInvalid CountValueType = iota - CountValueIndex -) - -// A ModuleVariable is a variable that is referencing the output -// of a module, such as "${module.foo.bar}" -type ModuleVariable struct { - Name string - Field string - key string - varRange -} - -// A PathVariable is a variable that references path information about the -// module. -type PathVariable struct { - Type PathValueType - key string - varRange -} - -type PathValueType byte - -const ( - PathValueInvalid PathValueType = iota - PathValueCwd - PathValueModule - PathValueRoot -) - -// A ResourceVariable is a variable that is referencing the field -// of a resource, such as "${aws_instance.foo.ami}" -type ResourceVariable struct { - Mode ResourceMode - Type string // Resource type, i.e. "aws_instance" - Name string // Resource name - Field string // Resource field - - Multi bool // True if multi-variable: aws_instance.foo.*.id - Index int // Index for multi-variable: aws_instance.foo.1.id == 1 - - key string - varRange -} - -// SelfVariable is a variable that is referencing the same resource -// it is running on: "${self.address}" -type SelfVariable struct { - Field string - - key string - varRange -} - -// SimpleVariable is an unprefixed variable, which can show up when users have -// strings they are passing down to resources that use interpolation -// internally. The template_file resource is an example of this. -type SimpleVariable struct { - Key string - varRange -} - -// TerraformVariable is a "terraform."-prefixed variable used to access -// metadata about the Terraform run. -type TerraformVariable struct { - Field string - key string - varRange -} - -// A UserVariable is a variable that is referencing a user variable -// that is inputted from outside the configuration. This looks like -// "${var.foo}" -type UserVariable struct { - Name string - Elem string - - key string - varRange -} - -// A LocalVariable is a variable that references a local value defined within -// the current module, via a "locals" block. This looks like "${local.foo}". -type LocalVariable struct { - Name string - varRange -} - -func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { - if strings.HasPrefix(v, "count.") { - return NewCountVariable(v) - } else if strings.HasPrefix(v, "path.") { - return NewPathVariable(v) - } else if strings.HasPrefix(v, "self.") { - return NewSelfVariable(v) - } else if strings.HasPrefix(v, "terraform.") { - return NewTerraformVariable(v) - } else if strings.HasPrefix(v, "var.") { - return NewUserVariable(v) - } else if strings.HasPrefix(v, "local.") { - return NewLocalVariable(v) - } else if strings.HasPrefix(v, "module.") { - return NewModuleVariable(v) - } else if !strings.ContainsRune(v, '.') { - return NewSimpleVariable(v) - } else { - return NewResourceVariable(v) - } -} - -func NewCountVariable(key string) (*CountVariable, error) { - var fieldType CountValueType - parts := strings.SplitN(key, ".", 2) - switch parts[1] { - case "index": - fieldType = CountValueIndex - } - - return &CountVariable{ - Type: fieldType, - key: key, - }, nil -} - -func (c *CountVariable) FullKey() string { - return c.key -} - -func NewModuleVariable(key string) (*ModuleVariable, error) { - parts := strings.SplitN(key, ".", 3) - if len(parts) < 3 { - return nil, fmt.Errorf( - "%s: module variables must be three parts: module.name.attr", - key) - } - - return &ModuleVariable{ - Name: parts[1], - Field: parts[2], - key: key, - }, nil -} - -func (v *ModuleVariable) FullKey() string { - return v.key -} - -func (v *ModuleVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewPathVariable(key string) (*PathVariable, error) { - var fieldType PathValueType - parts := strings.SplitN(key, ".", 2) - switch parts[1] { - case "cwd": - fieldType = PathValueCwd - case "module": - fieldType = PathValueModule - case "root": - fieldType = PathValueRoot - } - - return &PathVariable{ - Type: fieldType, - key: key, - }, nil -} - -func (v *PathVariable) FullKey() string { - return v.key -} - -func NewResourceVariable(key string) (*ResourceVariable, error) { - var mode ResourceMode - var parts []string - if strings.HasPrefix(key, "data.") { - mode = DataResourceMode - parts = strings.SplitN(key, ".", 4) - if len(parts) < 4 { - return nil, fmt.Errorf( - "%s: data variables must be four parts: data.TYPE.NAME.ATTR", - key) - } - - // Don't actually need the "data." prefix for parsing, since it's - // always constant. - parts = parts[1:] - } else { - mode = ManagedResourceMode - parts = strings.SplitN(key, ".", 3) - if len(parts) < 3 { - return nil, fmt.Errorf( - "%s: resource variables must be three parts: TYPE.NAME.ATTR", - key) - } - } - - field := parts[2] - multi := false - var index int - - if idx := strings.Index(field, "."); idx != -1 { - indexStr := field[:idx] - multi = indexStr == "*" - index = -1 - - if !multi { - indexInt, err := strconv.ParseInt(indexStr, 0, 0) - if err == nil { - multi = true - index = int(indexInt) - } - } - - if multi { - field = field[idx+1:] - } - } - - return &ResourceVariable{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Field: field, - Multi: multi, - Index: index, - key: key, - }, nil -} - -func (v *ResourceVariable) ResourceId() string { - switch v.Mode { - case ManagedResourceMode: - return fmt.Sprintf("%s.%s", v.Type, v.Name) - case DataResourceMode: - return fmt.Sprintf("data.%s.%s", v.Type, v.Name) - default: - panic(fmt.Errorf("unknown resource mode %s", v.Mode)) - } -} - -func (v *ResourceVariable) FullKey() string { - return v.key -} - -func NewSelfVariable(key string) (*SelfVariable, error) { - field := key[len("self."):] - - return &SelfVariable{ - Field: field, - - key: key, - }, nil -} - -func (v *SelfVariable) FullKey() string { - return v.key -} - -func (v *SelfVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{Key: key}, nil -} - -func (v *SimpleVariable) FullKey() string { - return v.Key -} - -func (v *SimpleVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewTerraformVariable(key string) (*TerraformVariable, error) { - field := key[len("terraform."):] - return &TerraformVariable{ - Field: field, - key: key, - }, nil -} - -func (v *TerraformVariable) FullKey() string { - return v.key -} - -func (v *TerraformVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewUserVariable(key string) (*UserVariable, error) { - name := key[len("var."):] - elem := "" - if idx := strings.Index(name, "."); idx > -1 { - elem = name[idx+1:] - name = name[:idx] - } - - if len(elem) > 0 { - return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem) - } - - return &UserVariable{ - key: key, - - Name: name, - Elem: elem, - }, nil -} - -func (v *UserVariable) FullKey() string { - return v.key -} - -func (v *UserVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -func NewLocalVariable(key string) (*LocalVariable, error) { - name := key[len("local."):] - if idx := strings.Index(name, "."); idx > -1 { - return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name) - } - - return &LocalVariable{ - Name: name, - }, nil -} - -func (v *LocalVariable) FullKey() string { - return fmt.Sprintf("local.%s", v.Name) -} - -func (v *LocalVariable) GoString() string { - return fmt.Sprintf("*%#v", *v) -} - -// DetectVariables takes an AST root and returns all the interpolated -// variables that are detected in the AST tree. -func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { - var result []InterpolatedVariable - var resultErr error - - // Visitor callback - fn := func(n ast.Node) ast.Node { - if resultErr != nil { - return n - } - - switch vn := n.(type) { - case *ast.VariableAccess: - v, err := NewInterpolatedVariable(vn.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - case *ast.Index: - if va, ok := vn.Target.(*ast.VariableAccess); ok { - v, err := NewInterpolatedVariable(va.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - } - if va, ok := vn.Key.(*ast.VariableAccess); ok { - v, err := NewInterpolatedVariable(va.Name) - if err != nil { - resultErr = err - return n - } - result = append(result, v) - } - default: - return n - } - - return n - } - - // Visitor pattern - root.Accept(fn) - - if resultErr != nil { - return nil, resultErr - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go deleted file mode 100644 index 1f3f67b9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ /dev/null @@ -1,54 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -// stringSliceToVariableValue converts a string slice into the value -// required to be returned from interpolation functions which return -// TypeList. -func stringSliceToVariableValue(values []string) []ast.Variable { - output := make([]ast.Variable, len(values)) - for index, value := range values { - output[index] = ast.Variable{ - Type: ast.TypeString, - Value: value, - } - } - return output -} - -// listVariableSliceToVariableValue converts a list of lists into the value -// required to be returned from interpolation functions which return TypeList. -func listVariableSliceToVariableValue(values [][]ast.Variable) []ast.Variable { - output := make([]ast.Variable, len(values)) - - for index, value := range values { - output[index] = ast.Variable{ - Type: ast.TypeList, - Value: value, - } - } - return output -} - -func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { - output := make([]string, len(values)) - for index, value := range values { - if value.Type != ast.TypeString { - return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String()) - } - output[index] = value.Value.(string) - } - return output, nil -} - -// Funcs used to return a mapping of built-in functions for configuration. -// -// However, these function implementations are no longer used. To find the -// current function implementations, refer to ../lang/functions.go instead. -func Funcs() map[string]ast.Function { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go deleted file mode 100644 index f152d800..00000000 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ /dev/null @@ -1,282 +0,0 @@ -package config - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/mitchellh/reflectwalk" -) - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - // F is the function to call for every interpolation. It can be nil. - // - // If Replace is true, then the return value of F will be used to - // replace the interpolation. - F interpolationWalkerFunc - Replace bool - - // ContextF is an advanced version of F that also receives the - // location of where it is in the structure. This lets you do - // context-aware validation. - ContextF interpolationWalkerContextFunc - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex []int - unknownKeys []string -} - -// interpolationWalkerFunc is the callback called by interpolationWalk. -// It is called with any interpolation found. It should return a value -// to replace the interpolation with, along with any errors. -// -// If Replace is set to false in interpolationWalker, then the replace -// value can be anything as it will have no effect. -type interpolationWalkerFunc func(ast.Node) (interface{}, error) - -// interpolationWalkerContextFunc is called by interpolationWalk if -// ContextF is set. This receives both the interpolation and the location -// where the interpolation is. -// -// This callback can be used to validate the location of the interpolation -// within the configuration. -type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - - if l := len(w.sliceIndex); l > 0 { - w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String())) - } else { - w.key = append(w.key, k.String()) - } - - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = append(w.sliceIndex, i) - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := hil.Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.ContextF != nil { - w.ContextF(w.loc, astRoot) - } - - if w.F == nil { - return nil - } - - replaceVal, err := w.F(astRoot) - if err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if w.Replace { - // We need to determine if we need to remove this element - // if the result contains any "UnknownVariableValue" which is - // set if it is computed. This behavior is different if we're - // splitting (in a SliceElem) or not. - remove := false - if w.loc == reflectwalk.SliceElem { - switch typedReplaceVal := replaceVal.(type) { - case string: - if typedReplaceVal == hcl2shim.UnknownVariableValue { - remove = true - } - case []interface{}: - if hasUnknownValue(typedReplaceVal) { - remove = true - } - } - } else if replaceVal == hcl2shim.UnknownVariableValue { - remove = true - } - - if remove { - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - } - - resultVal := reflect.ValueOf(replaceVal) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - // if we don't have at least 2 values, we're not going to find a map, but - // we could panic. - if len(w.cs) < 2 { - return - } - - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func hasUnknownValue(variable []interface{}) bool { - for _, value := range variable { - if strVal, ok := value.(string); ok { - if strVal == hcl2shim.UnknownVariableValue { - return true - } - } - } - return false -} - -func (w *interpolationWalker) splitSlice() { - raw := w.cs[len(w.cs)-1] - - var s []interface{} - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - } - - split := false - for _, val := range s { - if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList { - split = true - } - if _, ok := val.([]interface{}); ok { - split = true - } - } - - if !split { - return - } - - result := make([]interface{}, 0) - for _, v := range s { - switch val := v.(type) { - case ast.Variable: - switch val.Type { - case ast.TypeList: - elements := val.Value.([]ast.Variable) - for _, element := range elements { - result = append(result, element.Value) - } - default: - result = append(result, val.Value) - } - case []interface{}: - result = append(result, val...) - default: - result = append(result, v) - } - } - - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go deleted file mode 100644 index 890d30be..00000000 --- a/vendor/github.com/hashicorp/terraform/config/lang.go +++ /dev/null @@ -1,11 +0,0 @@ -package config - -import ( - "github.com/hashicorp/hil/ast" -) - -type noopNode struct{} - -func (n *noopNode) Accept(ast.Visitor) ast.Node { return n } -func (n *noopNode) Pos() ast.Pos { return ast.Pos{} } -func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go deleted file mode 100644 index 612e25b9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ /dev/null @@ -1,212 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/hcl" -) - -// ErrNoConfigsFound is the error returned by LoadDir if no -// Terraform configuration files were found in the given directory. -type ErrNoConfigsFound struct { - Dir string -} - -func (e ErrNoConfigsFound) Error() string { - return fmt.Sprintf( - "No Terraform configuration files found in directory: %s", - e.Dir) -} - -// LoadJSON loads a single Terraform configuration from a given JSON document. -// -// The document must be a complete Terraform configuration. This function will -// NOT try to load any additional modules so only the given document is loaded. -func LoadJSON(raw json.RawMessage) (*Config, error) { - obj, err := hcl.Parse(string(raw)) - if err != nil { - return nil, fmt.Errorf( - "Error parsing JSON document as HCL: %s", err) - } - - // Start building the result - hclConfig := &hclConfigurable{ - Root: obj, - } - - return hclConfig.Config() -} - -// LoadFile loads the Terraform configuration from a given file. -// -// This file can be any format that Terraform recognizes, and import any -// other format that Terraform recognizes. -func LoadFile(path string) (*Config, error) { - importTree, err := loadTree(path) - if err != nil { - return nil, err - } - - configTree, err := importTree.ConfigTree() - - // Close the importTree now so that we can clear resources as quickly - // as possible. - importTree.Close() - - if err != nil { - return nil, err - } - - return configTree.Flatten() -} - -// LoadDir loads all the Terraform configuration files in a single -// directory and appends them together. -// -// Special files known as "override files" can also be present, which -// are merged into the loaded configuration. That is, the non-override -// files are loaded first to create the configuration. Then, the overrides -// are merged into the configuration to create the final configuration. -// -// Files are loaded in lexical order. -func LoadDir(root string) (*Config, error) { - files, overrides, err := dirFiles(root) - if err != nil { - return nil, err - } - if len(files) == 0 && len(overrides) == 0 { - return nil, &ErrNoConfigsFound{Dir: root} - } - - // Determine the absolute path to the directory. - rootAbs, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - var result *Config - - // Sort the files and overrides so we have a deterministic order - sort.Strings(files) - sort.Strings(overrides) - - // Load all the regular files, append them to each other. - for _, f := range files { - c, err := LoadFile(f) - if err != nil { - return nil, err - } - - if result != nil { - result, err = Append(result, c) - if err != nil { - return nil, err - } - } else { - result = c - } - } - if len(files) == 0 { - result = &Config{} - } - - // Load all the overrides, and merge them into the config - for _, f := range overrides { - c, err := LoadFile(f) - if err != nil { - return nil, err - } - - result, err = Merge(result, c) - if err != nil { - return nil, err - } - } - - // Mark the directory - result.Dir = rootAbs - - return result, nil -} - -// Ext returns the Terraform configuration extension of the given -// path, or a blank string if it is an invalid function. -func ext(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -func dirFiles(dir string) ([]string, []string, error) { - f, err := os.Open(dir) - if err != nil { - return nil, nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, nil, err - } - if !fi.IsDir() { - return nil, nil, fmt.Errorf( - "configuration path must be a directory: %s", - dir) - } - - var files, overrides []string - err = nil - for err != io.EOF { - var fis []os.FileInfo - fis, err = f.Readdir(128) - if err != nil && err != io.EOF { - return nil, nil, err - } - - for _, fi := range fis { - // Ignore directories - if fi.IsDir() { - continue - } - - // Only care about files that are valid to load - name := fi.Name() - extValue := ext(name) - if extValue == "" || IsIgnoredFile(name) { - continue - } - - // Determine if we're dealing with an override - nameNoExt := name[:len(name)-len(extValue)] - override := nameNoExt == "override" || - strings.HasSuffix(nameNoExt, "_override") - - path := filepath.Join(dir, name) - if override { - overrides = append(overrides, path) - } else { - files = append(files, path) - } - } - } - - return files, overrides, nil -} - -// IsIgnoredFile returns true or false depending on whether the -// provided file name is a file that should be ignored. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go deleted file mode 100644 index 68cffe2c..00000000 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ /dev/null @@ -1,1270 +0,0 @@ -package config - -import ( - "fmt" - "io/ioutil" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" -) - -// hclConfigurable is an implementation of configurable that knows -// how to turn HCL configuration into a *Config object. -type hclConfigurable struct { - File string - Root *ast.File -} - -var ReservedDataSourceFields = []string{ - "connection", - "count", - "depends_on", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedResourceFields = []string{ - "connection", - "count", - "depends_on", - "id", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedProviderFields = []string{ - "alias", - "version", -} - -func (t *hclConfigurable) Config() (*Config, error) { - validKeys := map[string]struct{}{ - "atlas": struct{}{}, - "data": struct{}{}, - "locals": struct{}{}, - "module": struct{}{}, - "output": struct{}{}, - "provider": struct{}{}, - "resource": struct{}{}, - "terraform": struct{}{}, - "variable": struct{}{}, - } - - // Top-level item should be the object list - list, ok := t.Root.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("error parsing: file doesn't contain a root object") - } - - // Start building up the actual configuration. - config := new(Config) - - // Terraform config - if o := list.Filter("terraform"); len(o.Items) > 0 { - var err error - config.Terraform, err = loadTerraformHcl(o) - if err != nil { - return nil, err - } - } - - // Build the variables - if vars := list.Filter("variable"); len(vars.Items) > 0 { - var err error - config.Variables, err = loadVariablesHcl(vars) - if err != nil { - return nil, err - } - } - - // Build local values - if locals := list.Filter("locals"); len(locals.Items) > 0 { - var err error - config.Locals, err = loadLocalsHcl(locals) - if err != nil { - return nil, err - } - } - - // Get Atlas configuration - if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { - var err error - config.Atlas, err = loadAtlasHcl(atlas) - if err != nil { - return nil, err - } - } - - // Build the modules - if modules := list.Filter("module"); len(modules.Items) > 0 { - var err error - config.Modules, err = loadModulesHcl(modules) - if err != nil { - return nil, err - } - } - - // Build the provider configs - if providers := list.Filter("provider"); len(providers.Items) > 0 { - var err error - config.ProviderConfigs, err = loadProvidersHcl(providers) - if err != nil { - return nil, err - } - } - - // Build the resources - { - var err error - managedResourceConfigs := list.Filter("resource") - dataResourceConfigs := list.Filter("data") - - config.Resources = make( - []*Resource, 0, - len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items), - ) - - managedResources, err := loadManagedResourcesHcl(managedResourceConfigs) - if err != nil { - return nil, err - } - dataResources, err := loadDataResourcesHcl(dataResourceConfigs) - if err != nil { - return nil, err - } - - config.Resources = append(config.Resources, dataResources...) - config.Resources = append(config.Resources, managedResources...) - } - - // Build the outputs - if outputs := list.Filter("output"); len(outputs.Items) > 0 { - var err error - config.Outputs, err = loadOutputsHcl(outputs) - if err != nil { - return nil, err - } - } - - // Check for invalid keys - for _, item := range list.Items { - if len(item.Keys) == 0 { - // Not sure how this would happen, but let's avoid a panic - continue - } - - k := item.Keys[0].Token.Value().(string) - if _, ok := validKeys[k]; ok { - continue - } - - config.unknownKeys = append(config.unknownKeys, k) - } - - return config, nil -} - -// loadFileHcl is a fileLoaderFunc that knows how to read HCL -// files and turn them into hclConfigurables. -func loadFileHcl(root string) (configurable, []string, error) { - // Read the HCL file and prepare for parsing - d, err := ioutil.ReadFile(root) - if err != nil { - return nil, nil, fmt.Errorf( - "Error reading %s: %s", root, err) - } - - // Parse it - hclRoot, err := hcl.Parse(string(d)) - if err != nil { - return nil, nil, fmt.Errorf( - "Error parsing %s: %s", root, err) - } - - // Start building the result - result := &hclConfigurable{ - File: root, - Root: hclRoot, - } - - // Dive in, find the imports. This is disabled for now since - // imports were removed prior to Terraform 0.1. The code is - // remaining here commented for historical purposes. - /* - imports := obj.Get("import") - if imports == nil { - result.Object.Ref() - return result, nil, nil - } - - if imports.Type() != libucl.ObjectTypeString { - imports.Close() - - return nil, nil, fmt.Errorf( - "Error in %s: all 'import' declarations should be in the format\n"+ - "`import \"foo\"` (Got type %s)", - root, - imports.Type()) - } - - // Gather all the import paths - importPaths := make([]string, 0, imports.Len()) - iter := imports.Iterate(false) - for imp := iter.Next(); imp != nil; imp = iter.Next() { - path := imp.ToString() - if !filepath.IsAbs(path) { - // Relative paths are relative to the Terraform file itself - dir := filepath.Dir(root) - path = filepath.Join(dir, path) - } - - importPaths = append(importPaths, path) - imp.Close() - } - iter.Close() - imports.Close() - - result.Object.Ref() - */ - - return result, nil, nil -} - -// Given a handle to a HCL object, this transforms it into the Terraform config -func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'terraform' block allowed per module") - } - - // Get our one item - item := list.Items[0] - - // This block should have an empty top level ObjectItem. If there are keys - // here, it's likely because we have a flattened JSON object, and we can - // lift this into a nested ObjectList to decode properly. - if len(item.Keys) > 0 { - item = &ast.ObjectItem{ - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - } - - // We need the item value as an ObjectList - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("terraform block: should be an object") - } - - // NOTE: We purposely don't validate unknown HCL keys here so that - // we can potentially read _future_ Terraform version config (to - // still be able to validate the required version). - // - // We should still keep track of unknown keys to validate later, but - // HCL doesn't currently support that. - - var config Terraform - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading terraform config: %s", - err) - } - - // If we have provisioners, then parse those out - if os := listVal.Filter("backend"); len(os.Items) > 0 { - var err error - config.Backend, err = loadTerraformBackendHcl(os) - if err != nil { - return nil, fmt.Errorf( - "Error reading backend config for terraform block: %s", - err) - } - } - - return &config, nil -} - -// Loads the Backend configuration from an object list. -func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'backend' block allowed") - } - - // Get our one item - item := list.Items[0] - - // Verify the keys - if len(item.Keys) != 1 { - return nil, fmt.Errorf( - "position %s: 'backend' must be followed by exactly one string: a type", - item.Pos()) - } - - typ := item.Keys[0].Token.Value().(string) - - // Decode the raw config - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading backend config: %s", - err) - } - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading backend config: %s", - err) - } - - b := &Backend{ - Type: typ, - RawConfig: rawConfig, - } - b.Hash = b.Rehash() - - return b, nil -} - -// Given a handle to a HCL object, this transforms it into the Atlas -// configuration. -func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { - if len(list.Items) > 1 { - return nil, fmt.Errorf("only one 'atlas' block allowed") - } - - // Get our one item - item := list.Items[0] - - var config AtlasConfig - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading atlas config: %s", - err) - } - - return &config, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of modules. -// -// The resulting modules may not be unique, but each module -// represents exactly one module definition in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { - if err := assertAllBlocksHaveNames("module", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Module - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - k := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("module '%s': should be an object", k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) - } - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s: %s", - k, - err) - } - - // Remove the fields we handle specially - delete(config, "source") - delete(config, "version") - delete(config, "providers") - - var source string - if o := listVal.Filter("source"); len(o.Items) > 0 { - err = hcl.DecodeObject(&source, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing source for %s: %s", - k, - err) - } - } - - var version string - if o := listVal.Filter("version"); len(o.Items) > 0 { - err = hcl.DecodeObject(&version, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version for %s: %s", - k, - err) - } - } - - var providers map[string]string - if o := listVal.Filter("providers"); len(o.Items) > 0 { - err = hcl.DecodeObject(&providers, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing providers for %s: %s", - k, - err) - } - } - - result = append(result, &Module{ - Name: k, - Source: source, - Version: version, - Providers: providers, - RawConfig: rawConfig, - }) - } - - return result, nil -} - -// loadLocalsHcl recurses into the given HCL object turns it into -// a list of locals. -func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) { - - result := make([]*Local, 0, len(list.Items)) - - for _, block := range list.Items { - if len(block.Keys) > 0 { - return nil, fmt.Errorf( - "locals block at %s should not have label %q", - block.Pos(), block.Keys[0].Token.Value(), - ) - } - - blockObj, ok := block.Val.(*ast.ObjectType) - if !ok { - return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos()) - } - - // blockObj now contains directly our local decls - for _, item := range blockObj.List.Items { - if len(item.Keys) != 1 { - return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos()) - } - - // By the time we get here there can only be one item left, but - // we'll decode into a map anyway because it's a convenient way - // to extract both the key and the value robustly. - kv := map[string]interface{}{} - hcl.DecodeObject(&kv, item) - for k, v := range kv { - rawConfig, err := NewRawConfig(map[string]interface{}{ - "value": v, - }) - - if err != nil { - return nil, fmt.Errorf( - "error parsing local value %q at %s: %s", - k, item.Val.Pos(), err, - ) - } - - result = append(result, &Local{ - Name: k, - RawConfig: rawConfig, - }) - } - } - } - - return result, nil -} - -// LoadOutputsHcl recurses into the given HCL object and turns -// it into a mapping of outputs. -func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { - if err := assertAllBlocksHaveNames("output", list); err != nil { - return nil, err - } - - list = list.Children() - - // Go through each object and turn it into an actual result. - result := make([]*Output, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("output '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - // Delete special keys - delete(config, "depends_on") - delete(config, "description") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for output %s: %s", - n, - err) - } - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for output %q: %s", - n, - err) - } - } - - // If we have a description field, then filter that - var description string - if o := listVal.Filter("description"); len(o.Items) > 0 { - err := hcl.DecodeObject(&description, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading description for output %q: %s", - n, - err) - } - } - - result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, - DependsOn: dependsOn, - Description: description, - }) - } - - return result, nil -} - -// LoadVariablesHcl recurses into the given HCL object and turns -// it into a list of variables. -func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { - if err := assertAllBlocksHaveNames("variable", list); err != nil { - return nil, err - } - - list = list.Children() - - // hclVariable is the structure each variable is decoded into - type hclVariable struct { - DeclaredType string `hcl:"type"` - Default interface{} - Description string - Fields []string `hcl:",decodedFields"` - } - - // Go through each object and turn it into an actual result. - result := make([]*Variable, 0, len(list.Items)) - for _, item := range list.Items { - // Clean up items from JSON - unwrapHCLObjectKeysFromJSON(item, 1) - - // Verify the keys - if len(item.Keys) != 1 { - return nil, fmt.Errorf( - "position %s: 'variable' must be followed by exactly one strings: a name", - item.Pos()) - } - - n := item.Keys[0].Token.Value().(string) - if !NameRegexp.MatchString(n) { - return nil, fmt.Errorf( - "position %s: 'variable' name must match regular expression: %s", - item.Pos(), NameRegexp) - } - - // Check for invalid keys - valid := []string{"type", "default", "description"} - if err := checkHCLKeys(item.Val, valid); err != nil { - return nil, multierror.Prefix(err, fmt.Sprintf( - "variable[%s]:", n)) - } - - // Decode into hclVariable to get typed values - var hclVar hclVariable - if err := hcl.DecodeObject(&hclVar, item.Val); err != nil { - return nil, err - } - - // Defaults turn into a slice of map[string]interface{} and - // we need to make sure to convert that down into the - // proper type for Config. - if ms, ok := hclVar.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - - hclVar.Default = def - } - - // Build the new variable and do some basic validation - newVar := &Variable{ - Name: n, - DeclaredType: hclVar.DeclaredType, - Default: hclVar.Default, - Description: hclVar.Description, - } - if err := newVar.ValidateTypeAndDefault(); err != nil { - return nil, err - } - - result = append(result, newVar) - } - - return result, nil -} - -// LoadProvidersHcl recurses into the given HCL object and turns -// it into a mapping of provider configs. -func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { - if err := assertAllBlocksHaveNames("provider", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Go through each object and turn it into an actual result. - result := make([]*ProviderConfig, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("module '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - delete(config, "alias") - delete(config, "version") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for provider config %s: %s", - n, - err) - } - - // If we have an alias field, then add those in - var alias string - if a := listVal.Filter("alias"); len(a.Items) > 0 { - err := hcl.DecodeObject(&alias, a.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading alias for provider[%s]: %s", - n, - err) - } - } - - // If we have a version field then extract it - var version string - if a := listVal.Filter("version"); len(a.Items) > 0 { - err := hcl.DecodeObject(&version, a.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading version for provider[%s]: %s", - n, - err) - } - } - - result = append(result, &ProviderConfig{ - Name: n, - Alias: alias, - Version: version, - RawConfig: rawConfig, - }) - } - - return result, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of data sources. -// -// The resulting data sources may not be unique, but each one -// represents exactly one data definition in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { - if err := assertAllBlocksHaveNames("data", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Resource - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - if len(item.Keys) != 2 { - return nil, fmt.Errorf( - "position %s: 'data' must be followed by exactly two strings: a type and a name", - item.Pos()) - } - - t := item.Keys[0].Token.Value().(string) - k := item.Keys[1].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // Remove the fields we handle specially - delete(config, "depends_on") - delete(config, "provider") - delete(config, "count") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // If we have a count, then figure it out - var count string = "1" - if o := listVal.Filter("count"); len(o.Items) > 0 { - err = hcl.DecodeObject(&count, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) - if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have a provider, then parse it out - var provider string - if o := listVal.Filter("provider"); len(o.Items) > 0 { - err := hcl.DecodeObject(&provider, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t, - k, - err) - } - } - - result = append(result, &Resource{ - Mode: DataResourceMode, - Name: k, - Type: t, - RawCount: countConfig, - RawConfig: rawConfig, - Provider: provider, - Provisioners: []*Provisioner{}, - DependsOn: dependsOn, - Lifecycle: ResourceLifecycle{}, - }) - } - - return result, nil -} - -// Given a handle to a HCL object, this recurses into the structure -// and pulls out a list of managed resources. -// -// The resulting resources may not be unique, but each resource -// represents exactly one "resource" block in the HCL configuration. -// We leave it up to another pass to merge them together. -func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Where all the results will go - var result []*Resource - - // Now go over all the types and their children in order to get - // all of the actual resources. - for _, item := range list.Items { - // GH-4385: We detect a pure provisioner resource and give the user - // an error about how to do it cleanly. - if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" { - return nil, fmt.Errorf( - "position %s: provisioners in a resource should be wrapped in a list\n\n"+ - "Example: \"provisioner\": [ { \"local-exec\": ... } ]", - item.Pos()) - } - - // Fix up JSON input - unwrapHCLObjectKeysFromJSON(item, 2) - - if len(item.Keys) != 2 { - return nil, fmt.Errorf( - "position %s: resource must be followed by exactly two strings, a type and a name", - item.Pos()) - } - - t := item.Keys[0].Token.Value().(string) - k := item.Keys[1].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // Remove the fields we handle specially - delete(config, "connection") - delete(config, "count") - delete(config, "depends_on") - delete(config, "provisioner") - delete(config, "provider") - delete(config, "lifecycle") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t, - k, - err) - } - - // If we have a count, then figure it out - var count string = "1" - if o := listVal.Filter("count"); len(o.Items) > 0 { - err = hcl.DecodeObject(&count, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) - if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := listVal.Filter("depends_on"); len(o.Items) > 0 { - err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have connection info, then parse those out - var connInfo map[string]interface{} - if o := listVal.Filter("connection"); len(o.Items) > 0 { - err := hcl.DecodeObject(&connInfo, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading connection info for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have provisioners, then parse those out - var provisioners []*Provisioner - if os := listVal.Filter("provisioner"); len(os.Items) > 0 { - var err error - provisioners, err = loadProvisionersHcl(os, connInfo) - if err != nil { - return nil, fmt.Errorf( - "Error reading provisioners for %s[%s]: %s", - t, - k, - err) - } - } - - // If we have a provider, then parse it out - var provider string - if o := listVal.Filter("provider"); len(o.Items) > 0 { - err := hcl.DecodeObject(&provider, o.Items[0].Val) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t, - k, - err) - } - } - - // Check if the resource should be re-created before - // destroying the existing instance - var lifecycle ResourceLifecycle - if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { - if len(o.Items) > 1 { - return nil, fmt.Errorf( - "%s[%s]: Multiple lifecycle blocks found, expected one", - t, k) - } - - // Check for invalid keys - valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"} - if err := checkHCLKeys(o.Items[0].Val, valid); err != nil { - return nil, multierror.Prefix(err, fmt.Sprintf( - "%s[%s]:", t, k)) - } - - var raw map[string]interface{} - if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t, - k, - err) - } - - if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t, - k, - err) - } - } - - result = append(result, &Resource{ - Mode: ManagedResourceMode, - Name: k, - Type: t, - RawCount: countConfig, - RawConfig: rawConfig, - Provisioners: provisioners, - Provider: provider, - DependsOn: dependsOn, - Lifecycle: lifecycle, - }) - } - - return result, nil -} - -func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { - if err := assertAllBlocksHaveNames("provisioner", list); err != nil { - return nil, err - } - - list = list.Children() - if len(list.Items) == 0 { - return nil, nil - } - - // Go through each object and turn it into an actual result. - result := make([]*Provisioner, 0, len(list.Items)) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - - var listVal *ast.ObjectList - if ot, ok := item.Val.(*ast.ObjectType); ok { - listVal = ot.List - } else { - return nil, fmt.Errorf("provisioner '%s': should be an object", n) - } - - var config map[string]interface{} - if err := hcl.DecodeObject(&config, item.Val); err != nil { - return nil, err - } - - // Parse the "when" value - when := ProvisionerWhenCreate - if v, ok := config["when"]; ok { - switch v { - case "create": - when = ProvisionerWhenCreate - case "destroy": - when = ProvisionerWhenDestroy - default: - return nil, fmt.Errorf( - "position %s: 'provisioner' when must be 'create' or 'destroy'", - item.Pos()) - } - } - - // Parse the "on_failure" value - onFailure := ProvisionerOnFailureFail - if v, ok := config["on_failure"]; ok { - switch v { - case "continue": - onFailure = ProvisionerOnFailureContinue - case "fail": - onFailure = ProvisionerOnFailureFail - default: - return nil, fmt.Errorf( - "position %s: 'provisioner' on_failure must be 'continue' or 'fail'", - item.Pos()) - } - } - - // Delete fields we special case - delete(config, "connection") - delete(config, "when") - delete(config, "on_failure") - - rawConfig, err := NewRawConfig(config) - if err != nil { - return nil, err - } - - // Check if we have a provisioner-level connection - // block that overrides the resource-level - var subConnInfo map[string]interface{} - if o := listVal.Filter("connection"); len(o.Items) > 0 { - err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val) - if err != nil { - return nil, err - } - } - - // Inherit from the resource connInfo any keys - // that are not explicitly overriden. - if connInfo != nil && subConnInfo != nil { - for k, v := range connInfo { - if _, ok := subConnInfo[k]; !ok { - subConnInfo[k] = v - } - } - } else if subConnInfo == nil { - subConnInfo = connInfo - } - - // Parse the connInfo - connRaw, err := NewRawConfig(subConnInfo) - if err != nil { - return nil, err - } - - result = append(result, &Provisioner{ - Type: n, - RawConfig: rawConfig, - ConnInfo: connRaw, - When: when, - OnFailure: onFailure, - }) - } - - return result, nil -} - -/* -func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { - objects := make(map[string][]*hclobj.Object) - - for _, o := range os.Elem(false) { - for _, elem := range o.Elem(true) { - val, ok := objects[elem.Key] - if !ok { - val = make([]*hclobj.Object, 0, 1) - } - - val = append(val, elem) - objects[elem.Key] = val - } - } - - return objects -} -*/ - -// assertAllBlocksHaveNames returns an error if any of the items in -// the given object list are blocks without keys (like "module {}") -// or simple assignments (like "module = 1"). It returns nil if -// neither of these things are true. -// -// The given name is used in any generated error messages, and should -// be the name of the block we're dealing with. The given list should -// be the result of calling .Filter on an object list with that same -// name. -func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { - if elem := list.Elem(); len(elem.Items) != 0 { - switch et := elem.Items[0].Val.(type) { - case *ast.ObjectType: - pos := et.Lbrace - return fmt.Errorf("%s: %q must be followed by a name", pos, name) - default: - pos := elem.Items[0].Val.Pos() - return fmt.Errorf("%s: %q must be a configuration block", pos, name) - } - } - return nil -} - -func checkHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf( - "invalid key: %s", key)) - } - } - - return result -} - -// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when -// parsing JSON as input: if we're parsing JSON then directly nested -// items will show up as additional "keys". -// -// For objects that expect a fixed number of keys, this breaks the -// decoding process. This function unwraps the object into what it would've -// looked like if it came directly from HCL by specifying the number of keys -// you expect. -// -// Example: -// -// { "foo": { "baz": {} } } -// -// Will show up with Keys being: []string{"foo", "baz"} -// when we really just want the first two. This function will fix this. -func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) { - if len(item.Keys) > depth && item.Keys[0].Token.JSON { - for len(item.Keys) > depth { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{ - &ast.ObjectItem{ - Keys: []*ast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go deleted file mode 100644 index da7559a9..00000000 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go +++ /dev/null @@ -1,473 +0,0 @@ -package config - -import ( - "fmt" - "sort" - "strings" - - hcl2 "github.com/hashicorp/hcl/v2" - gohcl2 "github.com/hashicorp/hcl/v2/gohcl" - hcl2parse "github.com/hashicorp/hcl/v2/hclparse" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" -) - -// hcl2Configurable is an implementation of configurable that knows -// how to turn a HCL Body into a *Config object. -type hcl2Configurable struct { - SourceFilename string - Body hcl2.Body -} - -// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc. -type hcl2Loader struct { - Parser *hcl2parse.Parser -} - -// For the moment we'll just have a global loader since we don't have anywhere -// better to stash this. -// TODO: refactor the loader API so that it uses some sort of object we can -// stash the parser inside. -var globalHCL2Loader = newHCL2Loader() - -// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser. -// -// HCL parsers retain information about files that are loaded to aid in -// producing diagnostic messages, so all files within a single configuration -// should be loaded with the same parser to ensure the availability of -// full diagnostic information. -func newHCL2Loader() hcl2Loader { - return hcl2Loader{ - Parser: hcl2parse.NewParser(), - } -} - -// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it -// into a hcl2Configurable. -func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) { - var f *hcl2.File - var diags hcl2.Diagnostics - if strings.HasSuffix(filename, ".json") { - f, diags = l.Parser.ParseJSONFile(filename) - } else { - f, diags = l.Parser.ParseHCLFile(filename) - } - if diags.HasErrors() { - // Return diagnostics as an error; callers may type-assert this to - // recover the original diagnostics, if it doesn't end up wrapped - // in another error. - return nil, nil, diags - } - - return &hcl2Configurable{ - SourceFilename: filename, - Body: f.Body, - }, nil, nil -} - -func (t *hcl2Configurable) Config() (*Config, error) { - config := &Config{} - - // these structs are used only for the initial shallow decoding; we'll - // expand this into the main, public-facing config structs afterwards. - type atlas struct { - Name string `hcl:"name"` - Include *[]string `hcl:"include"` - Exclude *[]string `hcl:"exclude"` - } - type provider struct { - Name string `hcl:"name,label"` - Alias *string `hcl:"alias,attr"` - Version *string `hcl:"version,attr"` - Config hcl2.Body `hcl:",remain"` - } - type module struct { - Name string `hcl:"name,label"` - Source string `hcl:"source,attr"` - Version *string `hcl:"version,attr"` - Providers *map[string]string `hcl:"providers,attr"` - Config hcl2.Body `hcl:",remain"` - } - type resourceLifecycle struct { - CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"` - PreventDestroy *bool `hcl:"prevent_destroy,attr"` - IgnoreChanges *[]string `hcl:"ignore_changes,attr"` - } - type connection struct { - Config hcl2.Body `hcl:",remain"` - } - type provisioner struct { - Type string `hcl:"type,label"` - - When *string `hcl:"when,attr"` - OnFailure *string `hcl:"on_failure,attr"` - - Connection *connection `hcl:"connection,block"` - Config hcl2.Body `hcl:",remain"` - } - type managedResource struct { - Type string `hcl:"type,label"` - Name string `hcl:"name,label"` - - CountExpr hcl2.Expression `hcl:"count,attr"` - Provider *string `hcl:"provider,attr"` - DependsOn *[]string `hcl:"depends_on,attr"` - - Lifecycle *resourceLifecycle `hcl:"lifecycle,block"` - Provisioners []provisioner `hcl:"provisioner,block"` - Connection *connection `hcl:"connection,block"` - - Config hcl2.Body `hcl:",remain"` - } - type dataResource struct { - Type string `hcl:"type,label"` - Name string `hcl:"name,label"` - - CountExpr hcl2.Expression `hcl:"count,attr"` - Provider *string `hcl:"provider,attr"` - DependsOn *[]string `hcl:"depends_on,attr"` - - Config hcl2.Body `hcl:",remain"` - } - type variable struct { - Name string `hcl:"name,label"` - - DeclaredType *string `hcl:"type,attr"` - Default *cty.Value `hcl:"default,attr"` - Description *string `hcl:"description,attr"` - Sensitive *bool `hcl:"sensitive,attr"` - } - type output struct { - Name string `hcl:"name,label"` - - ValueExpr hcl2.Expression `hcl:"value,attr"` - DependsOn *[]string `hcl:"depends_on,attr"` - Description *string `hcl:"description,attr"` - Sensitive *bool `hcl:"sensitive,attr"` - } - type locals struct { - Definitions hcl2.Attributes `hcl:",remain"` - } - type backend struct { - Type string `hcl:"type,label"` - Config hcl2.Body `hcl:",remain"` - } - type terraform struct { - RequiredVersion *string `hcl:"required_version,attr"` - Backend *backend `hcl:"backend,block"` - } - type topLevel struct { - Atlas *atlas `hcl:"atlas,block"` - Datas []dataResource `hcl:"data,block"` - Modules []module `hcl:"module,block"` - Outputs []output `hcl:"output,block"` - Providers []provider `hcl:"provider,block"` - Resources []managedResource `hcl:"resource,block"` - Terraform *terraform `hcl:"terraform,block"` - Variables []variable `hcl:"variable,block"` - Locals []*locals `hcl:"locals,block"` - } - - var raw topLevel - diags := gohcl2.DecodeBody(t.Body, nil, &raw) - if diags.HasErrors() { - // Do some minimal decoding to see if we can at least get the - // required Terraform version, which might help explain why we - // couldn't parse the rest. - if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil { - config.Terraform = &Terraform{ - RequiredVersion: *raw.Terraform.RequiredVersion, - } - } - - // We return the diags as an implementation of error, which the - // caller than then type-assert if desired to recover the individual - // diagnostics. - // FIXME: The current API gives us no way to return warnings in the - // absence of any errors. - return config, diags - } - - if raw.Terraform != nil { - var reqdVersion string - var backend *Backend - - if raw.Terraform.RequiredVersion != nil { - reqdVersion = *raw.Terraform.RequiredVersion - } - if raw.Terraform.Backend != nil { - backend = new(Backend) - backend.Type = raw.Terraform.Backend.Type - - // We don't permit interpolations or nested blocks inside the - // backend config, so we can decode the config early here and - // get direct access to the values, which is important for the - // config hashing to work as expected. - var config map[string]string - configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config) - diags = append(diags, configDiags...) - - raw := make(map[string]interface{}, len(config)) - for k, v := range config { - raw[k] = v - } - - var err error - backend.RawConfig, err = NewRawConfig(raw) - if err != nil { - diags = append(diags, &hcl2.Diagnostic{ - Severity: hcl2.DiagError, - Summary: "Invalid backend configuration", - Detail: fmt.Sprintf("Error in backend configuration: %s", err), - }) - } - } - - config.Terraform = &Terraform{ - RequiredVersion: reqdVersion, - Backend: backend, - } - } - - if raw.Atlas != nil { - var include, exclude []string - if raw.Atlas.Include != nil { - include = *raw.Atlas.Include - } - if raw.Atlas.Exclude != nil { - exclude = *raw.Atlas.Exclude - } - config.Atlas = &AtlasConfig{ - Name: raw.Atlas.Name, - Include: include, - Exclude: exclude, - } - } - - for _, rawM := range raw.Modules { - m := &Module{ - Name: rawM.Name, - Source: rawM.Source, - RawConfig: NewRawConfigHCL2(rawM.Config), - } - - if rawM.Version != nil { - m.Version = *rawM.Version - } - - if rawM.Providers != nil { - m.Providers = *rawM.Providers - } - - config.Modules = append(config.Modules, m) - } - - for _, rawV := range raw.Variables { - v := &Variable{ - Name: rawV.Name, - } - if rawV.DeclaredType != nil { - v.DeclaredType = *rawV.DeclaredType - } - if rawV.Default != nil { - v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default) - } - if rawV.Description != nil { - v.Description = *rawV.Description - } - - config.Variables = append(config.Variables, v) - } - - for _, rawO := range raw.Outputs { - o := &Output{ - Name: rawO.Name, - } - - if rawO.Description != nil { - o.Description = *rawO.Description - } - if rawO.DependsOn != nil { - o.DependsOn = *rawO.DependsOn - } - if rawO.Sensitive != nil { - o.Sensitive = *rawO.Sensitive - } - - // The result is expected to be a map like map[string]interface{}{"value": something}, - // so we'll fake that with our hcl2shim.SingleAttrBody shim. - o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{ - Name: "value", - Expr: rawO.ValueExpr, - }) - - config.Outputs = append(config.Outputs, o) - } - - for _, rawR := range raw.Resources { - r := &Resource{ - Mode: ManagedResourceMode, - Type: rawR.Type, - Name: rawR.Name, - } - if rawR.Lifecycle != nil { - var l ResourceLifecycle - if rawR.Lifecycle.CreateBeforeDestroy != nil { - l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy - } - if rawR.Lifecycle.PreventDestroy != nil { - l.PreventDestroy = *rawR.Lifecycle.PreventDestroy - } - if rawR.Lifecycle.IgnoreChanges != nil { - l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges - } - r.Lifecycle = l - } - if rawR.Provider != nil { - r.Provider = *rawR.Provider - } - if rawR.DependsOn != nil { - r.DependsOn = *rawR.DependsOn - } - - var defaultConnInfo *RawConfig - if rawR.Connection != nil { - defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config) - } - - for _, rawP := range rawR.Provisioners { - p := &Provisioner{ - Type: rawP.Type, - } - - switch { - case rawP.When == nil: - p.When = ProvisionerWhenCreate - case *rawP.When == "create": - p.When = ProvisionerWhenCreate - case *rawP.When == "destroy": - p.When = ProvisionerWhenDestroy - default: - p.When = ProvisionerWhenInvalid - } - - switch { - case rawP.OnFailure == nil: - p.OnFailure = ProvisionerOnFailureFail - case *rawP.When == "fail": - p.OnFailure = ProvisionerOnFailureFail - case *rawP.When == "continue": - p.OnFailure = ProvisionerOnFailureContinue - default: - p.OnFailure = ProvisionerOnFailureInvalid - } - - if rawP.Connection != nil { - p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config) - } else { - p.ConnInfo = defaultConnInfo - } - - p.RawConfig = NewRawConfigHCL2(rawP.Config) - - r.Provisioners = append(r.Provisioners, p) - } - - // The old loader records the count expression as a weird RawConfig with - // a single-element map inside. Since the rest of the world is assuming - // that, we'll mimic it here. - { - countBody := hcl2shim.SingleAttrBody{ - Name: "count", - Expr: rawR.CountExpr, - } - - r.RawCount = NewRawConfigHCL2(countBody) - r.RawCount.Key = "count" - } - - r.RawConfig = NewRawConfigHCL2(rawR.Config) - - config.Resources = append(config.Resources, r) - - } - - for _, rawR := range raw.Datas { - r := &Resource{ - Mode: DataResourceMode, - Type: rawR.Type, - Name: rawR.Name, - } - - if rawR.Provider != nil { - r.Provider = *rawR.Provider - } - if rawR.DependsOn != nil { - r.DependsOn = *rawR.DependsOn - } - - // The old loader records the count expression as a weird RawConfig with - // a single-element map inside. Since the rest of the world is assuming - // that, we'll mimic it here. - { - countBody := hcl2shim.SingleAttrBody{ - Name: "count", - Expr: rawR.CountExpr, - } - - r.RawCount = NewRawConfigHCL2(countBody) - r.RawCount.Key = "count" - } - - r.RawConfig = NewRawConfigHCL2(rawR.Config) - - config.Resources = append(config.Resources, r) - } - - for _, rawP := range raw.Providers { - p := &ProviderConfig{ - Name: rawP.Name, - } - - if rawP.Alias != nil { - p.Alias = *rawP.Alias - } - if rawP.Version != nil { - p.Version = *rawP.Version - } - - // The result is expected to be a map like map[string]interface{}{"value": something}, - // so we'll fake that with our hcl2shim.SingleAttrBody shim. - p.RawConfig = NewRawConfigHCL2(rawP.Config) - - config.ProviderConfigs = append(config.ProviderConfigs, p) - } - - for _, rawL := range raw.Locals { - names := make([]string, 0, len(rawL.Definitions)) - for n := range rawL.Definitions { - names = append(names, n) - } - sort.Strings(names) - for _, n := range names { - attr := rawL.Definitions[n] - l := &Local{ - Name: n, - RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{ - Name: "value", - Expr: attr.Expr, - }), - } - config.Locals = append(config.Locals, l) - } - } - - // FIXME: The current API gives us no way to return warnings in the - // absence of any errors. - var err error - if diags.HasErrors() { - err = diags - } - - return config, err -} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go deleted file mode 100644 index 55fc864f..00000000 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ /dev/null @@ -1,204 +0,0 @@ -package config - -// Merge merges two configurations into a single configuration. -// -// Merge allows for the two configurations to have duplicate resources, -// because the resources will be merged. This differs from a single -// Config which must only have unique resources. -func Merge(c1, c2 *Config) (*Config, error) { - c := new(Config) - - // Merge unknown keys - unknowns := make(map[string]struct{}) - for _, k := range c1.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - for _, k := range c2.unknownKeys { - _, present := unknowns[k] - if !present { - unknowns[k] = struct{}{} - c.unknownKeys = append(c.unknownKeys, k) - } - } - - // Merge Atlas configuration. This is a dumb one overrides the other - // sort of merge. - c.Atlas = c1.Atlas - if c2.Atlas != nil { - c.Atlas = c2.Atlas - } - - // Merge the Terraform configuration - if c1.Terraform != nil { - c.Terraform = c1.Terraform - if c2.Terraform != nil { - c.Terraform.Merge(c2.Terraform) - } - } else { - c.Terraform = c2.Terraform - } - - // NOTE: Everything below is pretty gross. Due to the lack of generics - // in Go, there is some hoop-jumping involved to make this merging a - // little more test-friendly and less repetitive. Ironically, making it - // less repetitive involves being a little repetitive, but I prefer to - // be repetitive with things that are less error prone than things that - // are more error prone (more logic). Type conversions to an interface - // are pretty low-error. - - var m1, m2, mresult []merger - - // Modules - m1 = make([]merger, 0, len(c1.Modules)) - m2 = make([]merger, 0, len(c2.Modules)) - for _, v := range c1.Modules { - m1 = append(m1, v) - } - for _, v := range c2.Modules { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Modules = make([]*Module, len(mresult)) - for i, v := range mresult { - c.Modules[i] = v.(*Module) - } - } - - // Outputs - m1 = make([]merger, 0, len(c1.Outputs)) - m2 = make([]merger, 0, len(c2.Outputs)) - for _, v := range c1.Outputs { - m1 = append(m1, v) - } - for _, v := range c2.Outputs { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Outputs = make([]*Output, len(mresult)) - for i, v := range mresult { - c.Outputs[i] = v.(*Output) - } - } - - // Provider Configs - m1 = make([]merger, 0, len(c1.ProviderConfigs)) - m2 = make([]merger, 0, len(c2.ProviderConfigs)) - for _, v := range c1.ProviderConfigs { - m1 = append(m1, v) - } - for _, v := range c2.ProviderConfigs { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.ProviderConfigs = make([]*ProviderConfig, len(mresult)) - for i, v := range mresult { - c.ProviderConfigs[i] = v.(*ProviderConfig) - } - } - - // Resources - m1 = make([]merger, 0, len(c1.Resources)) - m2 = make([]merger, 0, len(c2.Resources)) - for _, v := range c1.Resources { - m1 = append(m1, v) - } - for _, v := range c2.Resources { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Resources = make([]*Resource, len(mresult)) - for i, v := range mresult { - c.Resources[i] = v.(*Resource) - } - } - - // Variables - m1 = make([]merger, 0, len(c1.Variables)) - m2 = make([]merger, 0, len(c2.Variables)) - for _, v := range c1.Variables { - m1 = append(m1, v) - } - for _, v := range c2.Variables { - m2 = append(m2, v) - } - mresult = mergeSlice(m1, m2) - if len(mresult) > 0 { - c.Variables = make([]*Variable, len(mresult)) - for i, v := range mresult { - c.Variables[i] = v.(*Variable) - } - } - - // Local Values - // These are simpler than the other config elements because they are just - // flat values and so no deep merging is required. - if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 { - // Explicit length check above because we want c.Locals to remain - // nil if the result would be empty. - c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) - c.Locals = append(c.Locals, c1.Locals...) - c.Locals = append(c.Locals, c2.Locals...) - } - - return c, nil -} - -// merger is an interface that must be implemented by types that are -// merge-able. This simplifies the implementation of Merge for the various -// components of a Config. -type merger interface { - mergerName() string - mergerMerge(merger) merger -} - -// mergeSlice merges a slice of mergers. -func mergeSlice(m1, m2 []merger) []merger { - r := make([]merger, len(m1), len(m1)+len(m2)) - copy(r, m1) - - m := map[string]struct{}{} - for _, v2 := range m2 { - // If we already saw it, just append it because its a - // duplicate and invalid... - name := v2.mergerName() - if _, ok := m[name]; ok { - r = append(r, v2) - continue - } - m[name] = struct{}{} - - // Find an original to override - var original merger - originalIndex := -1 - for i, v := range m1 { - if v.mergerName() == name { - originalIndex = i - original = v - break - } - } - - var v merger - if original == nil { - v = v2 - } else { - v = original.mergerMerge(v2) - } - - if originalIndex == -1 { - r = append(r, v) - } else { - r[originalIndex] = v - } - } - - return r -} diff --git a/vendor/github.com/hashicorp/terraform/config/providers.go b/vendor/github.com/hashicorp/terraform/config/providers.go deleted file mode 100644 index eeddabc3..00000000 --- a/vendor/github.com/hashicorp/terraform/config/providers.go +++ /dev/null @@ -1,61 +0,0 @@ -package config - -import "github.com/blang/semver" - -// ProviderVersionConstraint presents a constraint for a particular -// provider, identified by its full name. -type ProviderVersionConstraint struct { - Constraint string - ProviderType string -} - -// ProviderVersionConstraints is a map from provider full name to its associated -// ProviderVersionConstraint, as produced by Config.RequiredProviders. -type ProviderVersionConstraints map[string]ProviderVersionConstraint - -// RequiredRanges returns a semver.Range for each distinct provider type in -// the constraint map. If the same provider type appears more than once -// (e.g. because aliases are in use) then their respective constraints are -// combined such that they must *all* apply. -// -// The result of this method can be passed to the -// PluginMetaSet.ConstrainVersions method within the plugin/discovery -// package in order to filter down the available plugins to those which -// satisfy the given constraints. -// -// This function will panic if any of the constraints within cannot be -// parsed as semver ranges. This is guaranteed to never happen for a -// constraint set that was built from a configuration that passed validation. -func (cons ProviderVersionConstraints) RequiredRanges() map[string]semver.Range { - ret := make(map[string]semver.Range, len(cons)) - - for _, con := range cons { - spec := semver.MustParseRange(con.Constraint) - if existing, exists := ret[con.ProviderType]; exists { - ret[con.ProviderType] = existing.AND(spec) - } else { - ret[con.ProviderType] = spec - } - } - - return ret -} - -// ProviderConfigsByFullName returns a map from provider full names (as -// returned by ProviderConfig.FullName()) to the corresponding provider -// configs. -// -// This function returns no new information than what's already in -// c.ProviderConfigs, but returns it in a more convenient shape. If there -// is more than one provider config with the same full name then the result -// is undefined, but that is guaranteed not to happen for any config that -// has passed validation. -func (c *Config) ProviderConfigsByFullName() map[string]*ProviderConfig { - ret := make(map[string]*ProviderConfig, len(c.ProviderConfigs)) - - for _, pc := range c.ProviderConfigs { - ret[pc.FullName()] = pc - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go deleted file mode 100644 index 00fd43fc..00000000 --- a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go +++ /dev/null @@ -1,40 +0,0 @@ -package config - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -var provisionerWhenStrs = map[ProvisionerWhen]string{ - ProvisionerWhenInvalid: "invalid", - ProvisionerWhenCreate: "create", - ProvisionerWhenDestroy: "destroy", -} - -func (v ProvisionerWhen) String() string { - return provisionerWhenStrs[v] -} - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{ - ProvisionerOnFailureInvalid: "invalid", - ProvisionerOnFailureContinue: "continue", - ProvisionerOnFailureFail: "fail", -} - -func (v ProvisionerOnFailure) String() string { - return provisionerOnFailureStrs[v] -} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go deleted file mode 100644 index 27bcd1da..00000000 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ /dev/null @@ -1,419 +0,0 @@ -package config - -import ( - "bytes" - "encoding/gob" - "errors" - "strconv" - "sync" - - hcl2 "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hil" - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" -) - -// RawConfig is a structure that holds a piece of configuration -// where the overall structure is unknown since it will be used -// to configure a plugin or some other similar external component. -// -// RawConfigs can be interpolated with variables that come from -// other resources, user variables, etc. -// -// RawConfig supports a query-like interface to request -// information from deep within the structure. -type RawConfig struct { - Key string - - // Only _one_ of Raw and Body may be populated at a time. - // - // In the normal case, Raw is populated and Body is nil. - // - // When the experimental HCL2 parsing mode is enabled, "Body" - // is populated and RawConfig serves only to transport the hcl2.Body - // through the rest of Terraform core so we can ultimately decode it - // once its schema is known. - // - // Once we transition to HCL2 as the primary representation, RawConfig - // should be removed altogether and the hcl2.Body should be passed - // around directly. - - Raw map[string]interface{} - Body hcl2.Body - - Interpolations []ast.Node - Variables map[string]InterpolatedVariable - - lock sync.Mutex - config map[string]interface{} - unknownKeys []string -} - -// NewRawConfig creates a new RawConfig structure and populates the -// publicly readable struct fields. -func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { - result := &RawConfig{Raw: raw} - if err := result.init(); err != nil { - return nil, err - } - - return result, nil -} - -// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule -// to transport a hcl2.Body. In this mode, the publicly-readable struct -// fields are not populated since all operations should instead be diverted -// to the HCL2 body. -// -// For a RawConfig object constructed with this function, the only valid use -// is to later retrieve the Body value and call its own methods. Callers -// may choose to set and then later handle the Key field, in a manner -// consistent with how it is handled by the Value method, but the Value -// method itself must not be used. -// -// This is an experimental codepath to be used only by the HCL2 config loader. -// Non-experimental parsing should _always_ use NewRawConfig to produce a -// fully-functional RawConfig object. -func NewRawConfigHCL2(body hcl2.Body) *RawConfig { - return &RawConfig{ - Body: body, - } -} - -// RawMap returns a copy of the RawConfig.Raw map. -func (r *RawConfig) RawMap() map[string]interface{} { - r.lock.Lock() - defer r.lock.Unlock() - - m := make(map[string]interface{}) - for k, v := range r.Raw { - m[k] = v - } - return m -} - -// Copy returns a copy of this RawConfig, uninterpolated. -func (r *RawConfig) Copy() *RawConfig { - if r == nil { - return nil - } - - r.lock.Lock() - defer r.lock.Unlock() - - if r.Body != nil { - return NewRawConfigHCL2(r.Body) - } - - newRaw := make(map[string]interface{}) - for k, v := range r.Raw { - newRaw[k] = v - } - - result, err := NewRawConfig(newRaw) - if err != nil { - panic("copy failed: " + err.Error()) - } - - result.Key = r.Key - return result -} - -// Value returns the value of the configuration if this configuration -// has a Key set. If this does not have a Key set, nil will be returned. -func (r *RawConfig) Value() interface{} { - if c := r.Config(); c != nil { - if v, ok := c[r.Key]; ok { - return v - } - } - - r.lock.Lock() - defer r.lock.Unlock() - return r.Raw[r.Key] -} - -// Config returns the entire configuration with the variables -// interpolated from any call to Interpolate. -// -// If any interpolated variables are unknown (value set to -// UnknownVariableValue), the first non-container (map, slice, etc.) element -// will be removed from the config. The keys of unknown variables -// can be found using the UnknownKeys function. -// -// By pruning out unknown keys from the configuration, the raw -// structure will always successfully decode into its ultimate -// structure using something like mapstructure. -func (r *RawConfig) Config() map[string]interface{} { - r.lock.Lock() - defer r.lock.Unlock() - return r.config -} - -// Interpolate uses the given mapping of variable values and uses -// those as the values to replace any variables in this raw -// configuration. -// -// Any prior calls to Interpolate are replaced with this one. -// -// If a variable key is missing, this will panic. -func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { - r.lock.Lock() - defer r.lock.Unlock() - - config := langEvalConfig(vs) - return r.interpolate(func(root ast.Node) (interface{}, error) { - // None of the variables we need are computed, meaning we should - // be able to properly evaluate. - result, err := hil.Eval(root, config) - if err != nil { - return "", err - } - - return result.Value, nil - }) -} - -// Merge merges another RawConfig into this one (overriding any conflicting -// values in this config) and returns a new config. The original config -// is not modified. -func (r *RawConfig) Merge(other *RawConfig) *RawConfig { - r.lock.Lock() - defer r.lock.Unlock() - - // Merge the raw configurations - raw := make(map[string]interface{}) - for k, v := range r.Raw { - raw[k] = v - } - for k, v := range other.Raw { - raw[k] = v - } - - // Create the result - result, err := NewRawConfig(raw) - if err != nil { - panic(err) - } - - // Merge the interpolated results - result.config = make(map[string]interface{}) - for k, v := range r.config { - result.config[k] = v - } - for k, v := range other.config { - result.config[k] = v - } - - // Build the unknown keys - if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 { - unknownKeys := make(map[string]struct{}) - for _, k := range r.unknownKeys { - unknownKeys[k] = struct{}{} - } - for _, k := range other.unknownKeys { - unknownKeys[k] = struct{}{} - } - - result.unknownKeys = make([]string, 0, len(unknownKeys)) - for k, _ := range unknownKeys { - result.unknownKeys = append(result.unknownKeys, k) - } - } - - return result -} - -func (r *RawConfig) init() error { - r.lock.Lock() - defer r.lock.Unlock() - - r.config = r.Raw - r.Interpolations = nil - r.Variables = nil - - fn := func(node ast.Node) (interface{}, error) { - r.Interpolations = append(r.Interpolations, node) - vars, err := DetectVariables(node) - if err != nil { - return "", err - } - - for _, v := range vars { - if r.Variables == nil { - r.Variables = make(map[string]InterpolatedVariable) - } - - r.Variables[v.FullKey()] = v - } - - return "", nil - } - - walker := &interpolationWalker{F: fn} - if err := reflectwalk.Walk(r.Raw, walker); err != nil { - return err - } - - return nil -} - -func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { - if r.Body != nil { - // For RawConfigs created for the HCL2 experiement, callers must - // use the HCL2 Body API directly rather than interpolating via - // the RawConfig. - return errors.New("this feature is not yet supported under the HCL2 experiment") - } - - config, err := copystructure.Copy(r.Raw) - if err != nil { - return err - } - r.config = config.(map[string]interface{}) - - w := &interpolationWalker{F: fn, Replace: true} - err = reflectwalk.Walk(r.config, w) - if err != nil { - return err - } - - r.unknownKeys = w.unknownKeys - return nil -} - -func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { - if r == nil && r2 == nil { - return nil - } - - if r == nil { - r = &RawConfig{} - } - - rawRaw, err := copystructure.Copy(r.Raw) - if err != nil { - panic(err) - } - - raw := rawRaw.(map[string]interface{}) - if r2 != nil { - for k, v := range r2.Raw { - raw[k] = v - } - } - - result, err := NewRawConfig(raw) - if err != nil { - panic(err) - } - - return result -} - -// couldBeInteger is a helper that determines if the represented value could -// result in an integer. -// -// This function only works for RawConfigs that have "Key" set, meaning that -// a single result can be produced. Calling this function will overwrite -// the Config and Value results to be a test value. -// -// This function is conservative. If there is some doubt about whether the -// result could be an integer -- for example, if it depends on a variable -// whose type we don't know yet -- it will still return true. -func (r *RawConfig) couldBeInteger() bool { - if r.Key == "" { - // un-keyed RawConfigs can never produce numbers - return false - } - if r.Body == nil { - // Normal path: using the interpolator in this package - // Interpolate with a fixed number to verify that its a number. - r.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.Value().(string), 0, 0) - return err == nil - } else { - // We briefly tried to gradually implement HCL2 support by adding a - // branch here, but that experiment was not successful. - panic("HCL2 experimental path no longer supported") - } -} - -// UnknownKeys returns the keys of the configuration that are unknown -// because they had interpolated variables that must be computed. -func (r *RawConfig) UnknownKeys() []string { - r.lock.Lock() - defer r.lock.Unlock() - return r.unknownKeys -} - -// See GobEncode -func (r *RawConfig) GobDecode(b []byte) error { - var data gobRawConfig - err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data) - if err != nil { - return err - } - - r.Key = data.Key - r.Raw = data.Raw - - return r.init() -} - -// GobEncode is a custom Gob encoder to use so that we only include the -// raw configuration. Interpolated variables and such are lost and the -// tree of interpolated variables is recomputed on decode, since it is -// referentially transparent. -func (r *RawConfig) GobEncode() ([]byte, error) { - r.lock.Lock() - defer r.lock.Unlock() - - data := gobRawConfig{ - Key: r.Key, - Raw: r.Raw, - } - - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(data); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type gobRawConfig struct { - Key string - Raw map[string]interface{} -} - -// langEvalConfig returns the evaluation configuration we use to execute. -// -// The interpolation functions are no longer available here, because this -// codepath is no longer used. Instead, see ../lang/functions.go . -func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig { - funcMap := make(map[string]ast.Function) - for k, v := range Funcs() { - funcMap[k] = v - } - - return &hil.EvalConfig{ - GlobalScope: &ast.BasicScope{ - VarMap: vs, - FuncMap: funcMap, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go deleted file mode 100644 index dd915217..00000000 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go -type ResourceMode int - -const ( - ManagedResourceMode ResourceMode = iota - DataResourceMode -) diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go deleted file mode 100644 index 01052782..00000000 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. - -package config - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ManagedResourceMode-0] - _ = x[DataResourceMode-1] -} - -const _ResourceMode_name = "ManagedResourceModeDataResourceMode" - -var _ResourceMode_index = [...]uint8{0, 19, 35} - -func (i ResourceMode) String() string { - if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go deleted file mode 100644 index 831fc778..00000000 --- a/vendor/github.com/hashicorp/terraform/config/testing.go +++ /dev/null @@ -1,17 +0,0 @@ -package config - -import ( - "testing" -) - -// TestRawConfig is used to create a RawConfig for testing. -func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { - t.Helper() - - cfg, err := NewRawConfig(c) - if err != nil { - t.Fatalf("err: %s", err) - } - - return cfg -} diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go deleted file mode 100644 index 5d8b9732..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/backend.go +++ /dev/null @@ -1,55 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Backend represents a "backend" block inside a "terraform" block in a module -// or file. -type Backend struct { - Type string - Config hcl.Body - - TypeRange hcl.Range - DeclRange hcl.Range -} - -func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { - return &Backend{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - Config: block.Body, - DeclRange: block.DefRange, - }, nil -} - -// Hash produces a hash value for the reciever that covers the type and the -// portions of the config that conform to the given schema. -// -// If the config does not conform to the schema then the result is not -// meaningful for comparison since it will be based on an incomplete result. -// -// As an exception, required attributes in the schema are treated as optional -// for the purpose of hashing, so that an incomplete configuration can still -// be hashed. Other errors, such as extraneous attributes, have no such special -// case. -func (b *Backend) Hash(schema *configschema.Block) int { - // Don't fail if required attributes are not set. Instead, we'll just - // hash them as nulls. - schema = schema.NoneRequired() - spec := schema.DecoderSpec() - val, _ := hcldec.Decode(b.Config, spec, nil) - if val == cty.NilVal { - val = cty.UnknownVal(schema.ImpliedType()) - } - - toHash := cty.TupleVal([]cty.Value{ - cty.StringVal(b.Type), - val, - }) - - return toHash.Hash() -} diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go deleted file mode 100644 index b645ac89..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go +++ /dev/null @@ -1,164 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// ------------------------------------------------------------------------- -// Functions in this file are compatibility shims intended to ease conversion -// from the old configuration loader. Any use of these functions that makes -// a change should generate a deprecation warning explaining to the user how -// to update their code for new patterns. -// -// Shims are particularly important for any patterns that have been widely -// documented in books, tutorials, etc. Users will still be starting from -// these examples and we want to help them adopt the latest patterns rather -// than leave them stranded. -// ------------------------------------------------------------------------- - -// shimTraversalInString takes any arbitrary expression and checks if it is -// a quoted string in the native syntax. If it _is_, then it is parsed as a -// traversal and re-wrapped into a synthetic traversal expression and a -// warning is generated. Otherwise, the given expression is just returned -// verbatim. -// -// This function has no effect on expressions from the JSON syntax, since -// traversals in strings are the required pattern in that syntax. -// -// If wantKeyword is set, the generated warning diagnostic will talk about -// keywords rather than references. The behavior is otherwise unchanged, and -// the caller remains responsible for checking that the result is indeed -// a keyword, e.g. using hcl.ExprAsKeyword. -func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { - // ObjectConsKeyExpr is a special wrapper type used for keys on object - // constructors to deal with the fact that naked identifiers are normally - // handled as "bareword" strings rather than as variable references. Since - // we know we're interpreting as a traversal anyway (and thus it won't - // matter whether it's a string or an identifier) we can safely just unwrap - // here and then process whatever we find inside as normal. - if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { - expr = ocke.Wrapped - } - - if !exprIsNativeQuotedString(expr) { - return expr, nil - } - - strVal, diags := expr.Value(nil) - if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { - // Since we're not even able to attempt a shim here, we'll discard - // the diagnostics we saw so far and let the caller's own error - // handling take care of reporting the invalid expression. - return expr, nil - } - - // The position handling here isn't _quite_ right because it won't - // take into account any escape sequences in the literal string, but - // it should be close enough for any error reporting to make sense. - srcRange := expr.Range() - startPos := srcRange.Start // copy - startPos.Column++ // skip initial quote - startPos.Byte++ // skip initial quote - - traversal, tDiags := hclsyntax.ParseTraversalAbs( - []byte(strVal.AsString()), - srcRange.Filename, - startPos, - ) - diags = append(diags, tDiags...) - - if wantKeyword { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted keywords are deprecated", - Detail: "In this context, keywords are expected literally rather than in quotes. Terraform 0.11 and earlier required quotes, but quoted keywords are now deprecated and will be removed in a future version of Terraform. Remove the quotes surrounding this keyword to silence this warning.", - Subject: &srcRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted references are deprecated", - Detail: "In this context, references are expected literally rather than in quotes. Terraform 0.11 and earlier required quotes, but quoted references are now deprecated and will be removed in a future version of Terraform. Remove the quotes surrounding this reference to silence this warning.", - Subject: &srcRange, - }) - } - - return &hclsyntax.ScopeTraversalExpr{ - Traversal: traversal, - SrcRange: srcRange, - }, diags -} - -// shimIsIgnoreChangesStar returns true if the given expression seems to be -// a string literal whose value is "*". This is used to support a legacy -// form of ignore_changes = all . -// -// This function does not itself emit any diagnostics, so it's the caller's -// responsibility to emit a warning diagnostic when this function returns true. -func shimIsIgnoreChangesStar(expr hcl.Expression) bool { - val, valDiags := expr.Value(nil) - if valDiags.HasErrors() { - return false - } - if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { - return false - } - return val.AsString() == "*" -} - -// warnForDeprecatedInterpolations returns warning diagnostics if the given -// body can be proven to contain attributes whose expressions are native -// syntax expressions consisting entirely of a single template interpolation, -// which is a deprecated way to include a non-literal value in configuration. -// -// This is a best-effort sort of thing which relies on the physical HCL native -// syntax AST, so it might not catch everything. The main goal is to catch the -// "obvious" cases in order to help spread awareness that this old form is -// deprecated, when folks copy it from older examples they've found on the -// internet that were written for Terraform 0.11 or earlier. -func warnForDeprecatedInterpolationsInBody(body hcl.Body) hcl.Diagnostics { - var diags hcl.Diagnostics - - nativeBody, ok := body.(*hclsyntax.Body) - if !ok { - // If it's not native syntax then we've nothing to do here. - return diags - } - - for _, attr := range nativeBody.Attributes { - moreDiags := warnForDeprecatedInterpolationsInExpr(attr.Expr) - diags = append(diags, moreDiags...) - } - - for _, block := range nativeBody.Blocks { - // We'll also go hunting in nested blocks - moreDiags := warnForDeprecatedInterpolationsInBody(block.Body) - diags = append(diags, moreDiags...) - } - - return diags -} - -func warnForDeprecatedInterpolationsInExpr(expr hcl.Expression) hcl.Diagnostics { - var diags hcl.Diagnostics - - if _, ok := expr.(*hclsyntax.TemplateWrapExpr); !ok { - // We're only interested in TemplateWrapExpr, because that's how - // the HCL native syntax parser represents the case of a template - // that consists entirely of a single interpolation expression, which - // is therefore subject to the special case of passing through the - // inner value without conversion to string. - return diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Interpolation-only expressions are deprecated", - Detail: "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated. To silence this warning, remove the \"${ sequence from the start and the }\" sequence from the end of this expression, leaving just the inner expression.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", - Subject: expr.Range().Ptr(), - }) - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go deleted file mode 100644 index cde0014c..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/config.go +++ /dev/null @@ -1,205 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -// -// A module tree described in *this* package represents the static tree -// represented by configuration. During evaluation a static ModuleNode may -// expand into zero or more module instances depending on the use of count and -// for_each configuration attributes within each call. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *Module - - // CallRange is the source range for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallRange hcl.Range - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // SourceAddrRange is the location in the configuration source where the - // SourceAddr value was set, for use in diagnostic messages. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddrRange hcl.Range - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// NewEmptyConfig constructs a single-node configuration tree with an empty -// root module. This is generally a pretty useless thing to do, so most callers -// should instead use BuildConfig. -func NewEmptyConfig() *Config { - ret := &Config{} - ret.Root = ret - ret.Children = make(map[string]*Config) - ret.Module = &Module{} - return ret -} - -// Depth returns the number of "hops" the receiver is from the root of its -// module tree, with the root module having a depth of zero. -func (c *Config) Depth() int { - ret := 0 - this := c - for this.Parent != nil { - ret++ - this = this.Parent - } - return ret -} - -// DeepEach calls the given function once for each module in the tree, starting -// with the receiver. -// -// A parent is always called before its children and children of a particular -// node are visited in lexicographic order by their names. -func (c *Config) DeepEach(cb func(c *Config)) { - cb(c) - - names := make([]string, 0, len(c.Children)) - for name := range c.Children { - names = append(names, name) - } - - for _, name := range names { - c.Children[name].DeepEach(cb) - } -} - -// AllModules returns a slice of all the receiver and all of its descendent -// nodes in the module tree, in the same order they would be visited by -// DeepEach. -func (c *Config) AllModules() []*Config { - var ret []*Config - c.DeepEach(func(c *Config) { - ret = append(ret, c) - }) - return ret -} - -// Descendent returns the descendent config that has the given path beneath -// the receiver, or nil if there is no such module. -// -// The path traverses the static module tree, prior to any expansion to handle -// count and for_each arguments. -// -// An empty path will just return the receiver, and is therefore pointless. -func (c *Config) Descendent(path addrs.Module) *Config { - current := c - for _, name := range path { - current = current.Children[name] - if current == nil { - return nil - } - } - return current -} - -// DescendentForInstance is like Descendent except that it accepts a path -// to a particular module instance in the dynamic module graph, returning -// the node from the static module graph that corresponds to it. -// -// All instances created by a particular module call share the same -// configuration, so the keys within the given path are disregarded. -func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { - current := c - for _, step := range path { - current = current.Children[step.Name] - if current == nil { - return nil - } - } - return current -} - -// ProviderTypes returns the names of each distinct provider type referenced -// in the receiving configuration. -// -// This is a helper for easily determining which provider types are required -// to fully interpret the configuration, though it does not include version -// information and so callers are expected to have already dealt with -// provider version selection in an earlier step and have identified suitable -// versions for each provider. -func (c *Config) ProviderTypes() []string { - m := make(map[string]struct{}) - c.gatherProviderTypes(m) - - ret := make([]string, 0, len(m)) - for k := range m { - ret = append(ret, k) - } - sort.Strings(ret) - return ret -} -func (c *Config) gatherProviderTypes(m map[string]struct{}) { - if c == nil { - return - } - - for _, pc := range c.Module.ProviderConfigs { - m[pc.Name] = struct{}{} - } - for _, rc := range c.Module.ManagedResources { - providerAddr := rc.ProviderConfigAddr() - m[providerAddr.Type.LegacyString()] = struct{}{} - } - for _, rc := range c.Module.DataResources { - providerAddr := rc.ProviderConfigAddr() - m[providerAddr.Type.LegacyString()] = struct{}{} - } - - // Must also visit our child modules, recursively. - for _, cc := range c.Children { - cc.gatherProviderTypes(m) - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go deleted file mode 100644 index c38a6792..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/config_build.go +++ /dev/null @@ -1,180 +0,0 @@ -package configs - -import ( - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -// -// The result is a module tree that has so far only had basic module- and -// file-level invariants validated. If the returned diagnostics contains errors, -// the returned module tree may be incomplete but can still be used carefully -// for static analysis. -func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := map[string]*Config{} - - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - VersionConstraint: call.Version, - Parent: parent, - CallRange: call.DeclRange, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallRange: call.DeclRange, - SourceAddr: call.SourceAddr, - SourceAddrRange: call.SourceAddrRange, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = append(diags, modDiags...) - - ret[call.Name] = child - } - - return ret, diags -} - -// A ModuleWalker knows how to find and load a child module given details about -// the module to be loaded and a reference to its partially-loaded parent -// Config. -type ModuleWalker interface { - // LoadModule finds and loads a requested child module. - // - // If errors are detected during loading, implementations should return them - // in the diagnostics object. If the diagnostics object contains any errors - // then the caller will tolerate the returned module being nil or incomplete. - // If no errors are returned, it should be non-nil and complete. - // - // Full validation need not have been performed but an implementation should - // ensure that the basic file- and module-validations performed by the - // LoadConfigDir function (valid syntax, no namespace collisions, etc) have - // been performed before returning a module. - LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) - -// LoadModule implements ModuleWalker. -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return f(req) -} - -// ModuleRequest is used with the ModuleWalker interface to describe a child -// module that must be loaded. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // SourceAddrRange is the source range for the SourceAddr value as it - // was provided in configuration. This can and should be used to generate - // diagnostics about the source address having invalid syntax, referring - // to a non-existent object, etc. - SourceAddrRange hcl.Range - - // VersionConstraint is the version constraint applied to the module in - // configuration. This data structure includes the source range for - // the constraint, which can and should be used to generate diagnostics - // about constraint-related issues, such as constraints that eliminate all - // available versions of a module whose source is otherwise valid. - VersionConstraint VersionConstraint - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source range for the header of the "module" block - // in configuration that prompted this request. This can be used as the - // subject of an error diagnostic that relates to the module call itself, - // rather than to either its source address or its version number. - CallRange hcl.Range -} - -// DisabledModuleWalker is a ModuleWalker that doesn't support -// child modules at all, and so will return an error if asked to load one. -// -// This is provided primarily for testing. There is no good reason to use this -// in the main application. -var DisabledModuleWalker ModuleWalker - -func init() { - DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Child modules are not supported", - Detail: "Child module calls are not allowed in this context.", - Subject: &req.CallRange, - }, - } - }) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go deleted file mode 100644 index 840a7aa9..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go +++ /dev/null @@ -1,115 +0,0 @@ -package configload - -import ( - "io" - "os" - "path/filepath" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If the current path is a symlink, recreate the symlink relative to - // the dst directory - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - target, err := os.Readlink(path) - if err != nil { - return err - } - - return os.Symlink(target, dstPath) - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go deleted file mode 100644 index 8b615f90..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package configload knows how to install modules into the .terraform/modules -// directory and to load modules from those installed locations. It is used -// in conjunction with the LoadConfig function in the parent package. -package configload diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go deleted file mode 100644 index 146f04b3..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go +++ /dev/null @@ -1,153 +0,0 @@ -package configload - -import ( - "fmt" - "log" - "os" - "path/filepath" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - getter "github.com/hashicorp/go-getter" -) - -// We configure our own go-getter detector and getter sets here, because -// the set of sources we support is part of Terraform's documentation and -// so we don't want any new sources introduced in go-getter to sneak in here -// and work even though they aren't documented. This also insulates us from -// any meddling that might be done by other go-getter callers linked into our -// executable. - -var goGetterDetectors = []getter.Detector{ - new(getter.GitHubDetector), - new(getter.GitDetector), - new(getter.BitBucketDetector), - new(getter.GCSDetector), - new(getter.S3Detector), - new(getter.FileDetector), -} - -var goGetterNoDetectors = []getter.Detector{} - -var goGetterDecompressors = map[string]getter.Decompressor{ - "bz2": new(getter.Bzip2Decompressor), - "gz": new(getter.GzipDecompressor), - "xz": new(getter.XzDecompressor), - "zip": new(getter.ZipDecompressor), - - "tar.bz2": new(getter.TarBzip2Decompressor), - "tar.tbz2": new(getter.TarBzip2Decompressor), - - "tar.gz": new(getter.TarGzipDecompressor), - "tgz": new(getter.TarGzipDecompressor), - - "tar.xz": new(getter.TarXzDecompressor), - "txz": new(getter.TarXzDecompressor), -} - -var goGetterGetters = map[string]getter.Getter{ - "file": new(getter.FileGetter), - "gcs": new(getter.GCSGetter), - "git": new(getter.GitGetter), - "hg": new(getter.HgGetter), - "s3": new(getter.S3Getter), - "http": getterHTTPGetter, - "https": getterHTTPGetter, -} - -var getterHTTPClient = cleanhttp.DefaultClient() - -var getterHTTPGetter = &getter.HttpGetter{ - Client: getterHTTPClient, - Netrc: true, -} - -// A reusingGetter is a helper for the module installer that remembers -// the final resolved addresses of all of the sources it has already been -// asked to install, and will copy from a prior installation directory if -// it has the same resolved source address. -// -// The keys in a reusingGetter are resolved and trimmed source addresses -// (with a scheme always present, and without any "subdir" component), -// and the values are the paths where each source was previously installed. -type reusingGetter map[string]string - -// getWithGoGetter retrieves the package referenced in the given address -// into the installation path and then returns the full path to any subdir -// indicated in the address. -// -// The errors returned by this function are those surfaced by the underlying -// go-getter library, which have very inconsistent quality as -// end-user-actionable error messages. At this time we do not have any -// reasonable way to improve these error messages at this layer because -// the underlying errors are not separatelyr recognizable. -func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { - packageAddr, subDir := splitAddrSubdir(addr) - - log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) - - realAddr, err := getter.Detect(packageAddr, instPath, goGetterDetectors) - if err != nil { - return "", err - } - - var realSubDir string - realAddr, realSubDir = splitAddrSubdir(realAddr) - if realSubDir != "" { - subDir = filepath.Join(realSubDir, subDir) - } - - if realAddr != packageAddr { - log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) - } - - if prevDir, exists := g[realAddr]; exists { - log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) - err := os.Mkdir(instPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) - } - err = copyDir(instPath, prevDir) - if err != nil { - return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) - } - } else { - log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) - client := getter.Client{ - Src: realAddr, - Dst: instPath, - Pwd: instPath, - - Mode: getter.ClientModeDir, - - Detectors: goGetterNoDetectors, // we already did detection above - Decompressors: goGetterDecompressors, - Getters: goGetterGetters, - } - err = client.Get() - if err != nil { - return "", err - } - // Remember where we installed this so we might reuse this directory - // on subsequent calls to avoid re-downloading. - g[realAddr] = instPath - } - - // Our subDir string can contain wildcards until this point, so that - // e.g. a subDir of * can expand to one top-level directory in a .tar.gz - // archive. Now that we've expanded the archive successfully we must - // resolve that into a concrete path. - var finalDir string - if subDir != "" { - finalDir, err = getter.SubdirGlob(instPath, subDir) - log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) - if err != nil { - return "", err - } - } else { - finalDir = instPath - } - - // If we got this far then we have apparently succeeded in downloading - // the requested object! - return filepath.Clean(finalDir), nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go deleted file mode 100644 index 57df0414..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris dragonfly - -package configload - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go deleted file mode 100644 index 4dc28eaa..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package configload - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go deleted file mode 100644 index 0d22e672..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package configload - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go deleted file mode 100644 index a09b80c8..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go +++ /dev/null @@ -1,150 +0,0 @@ -package configload - -import ( - "fmt" - "path/filepath" - - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/registry" - "github.com/spf13/afero" -) - -// A Loader instance is the main entry-point for loading configurations via -// this package. -// -// It extends the general config-loading functionality in the parent package -// "configs" to support installation of modules from remote sources and -// loading full configurations using modules that were previously installed. -type Loader struct { - // parser is used to read configuration - parser *configs.Parser - - // modules is used to install and locate descendent modules that are - // referenced (directly or indirectly) from the root module. - modules moduleMgr -} - -// Config is used with NewLoader to specify configuration arguments for the -// loader. -type Config struct { - // ModulesDir is a path to a directory where descendent modules are - // (or should be) installed. (This is usually the - // .terraform/modules directory, in the common case where this package - // is being loaded from the main Terraform CLI package.) - ModulesDir string - - // Services is the service discovery client to use when locating remote - // module registry endpoints. If this is nil then registry sources are - // not supported, which should be true only in specialized circumstances - // such as in tests. - Services *disco.Disco -} - -// NewLoader creates and returns a loader that reads configuration from the -// real OS filesystem. -// -// The loader has some internal state about the modules that are currently -// installed, which is read from disk as part of this function. If that -// manifest cannot be read then an error will be returned. -func NewLoader(config *Config) (*Loader, error) { - fs := afero.NewOsFs() - parser := configs.NewParser(fs) - reg := registry.NewClient(config.Services, nil) - - ret := &Loader{ - parser: parser, - modules: moduleMgr{ - FS: afero.Afero{Fs: fs}, - CanInstall: true, - Dir: config.ModulesDir, - Services: config.Services, - Registry: reg, - }, - } - - err := ret.modules.readModuleManifestSnapshot() - if err != nil { - return nil, fmt.Errorf("failed to read module manifest: %s", err) - } - - return ret, nil -} - -// ModulesDir returns the path to the directory where the loader will look for -// the local cache of remote module packages. -func (l *Loader) ModulesDir() string { - return l.modules.Dir -} - -// RefreshModules updates the in-memory cache of the module manifest from the -// module manifest file on disk. This is not necessary in normal use because -// module installation and configuration loading are separate steps, but it -// can be useful in tests where module installation is done as a part of -// configuration loading by a helper function. -// -// Call this function after any module installation where an existing loader -// is already alive and may be used again later. -// -// An error is returned if the manifest file cannot be read. -func (l *Loader) RefreshModules() error { - if l == nil { - // Nothing to do, then. - return nil - } - return l.modules.readModuleManifestSnapshot() -} - -// Parser returns the underlying parser for this loader. -// -// This is useful for loading other sorts of files than the module directories -// that a loader deals with, since then they will share the source code cache -// for this loader and can thus be shown as snippets in diagnostic messages. -func (l *Loader) Parser() *configs.Parser { - return l.parser -} - -// Sources returns the source code cache for the underlying parser of this -// loader. This is a shorthand for l.Parser().Sources(). -func (l *Loader) Sources() map[string][]byte { - return l.parser.Sources() -} - -// IsConfigDir returns true if and only if the given directory contains at -// least one Terraform configuration file. This is a wrapper around calling -// the same method name on the loader's parser. -func (l *Loader) IsConfigDir(path string) bool { - return l.parser.IsConfigDir(path) -} - -// ImportSources writes into the receiver's source code the given source -// code buffers. -// -// This is useful in the situation where an ancillary loader is created for -// some reason (e.g. loading config from a plan file) but the cached source -// code from that loader must be imported into the "main" loader in order -// to return source code snapshots in diagnostic messages. -// -// loader.ImportSources(otherLoader.Sources()) -func (l *Loader) ImportSources(sources map[string][]byte) { - p := l.Parser() - for name, src := range sources { - p.ForceFileSource(name, src) - } -} - -// ImportSourcesFromSnapshot writes into the receiver's source code the -// source files from the given snapshot. -// -// This is similar to ImportSources but knows how to unpack and flatten a -// snapshot data structure to get the corresponding flat source file map. -func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) { - p := l.Parser() - for _, m := range snap.Modules { - baseDir := m.Dir - for fn, src := range m.Files { - fullPath := filepath.Join(baseDir, fn) - p.ForceFileSource(fullPath, src) - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go deleted file mode 100644 index 80b2de1b..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go +++ /dev/null @@ -1,105 +0,0 @@ -package configload - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs" -) - -// LoadConfig reads the Terraform module in the given directory and uses it as the -// root module to build the static module tree that represents a configuration, -// assuming that all required descendent modules have already been installed. -// -// If error diagnostics are returned, the returned configuration may be either -// nil or incomplete. In the latter case, cautious static analysis is possible -// in spite of the errors. -// -// LoadConfig performs the basic syntax and uniqueness validations that are -// required to process the individual modules, and also detects -func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { - rootMod, diags := l.parser.LoadConfigDir(rootDir) - if rootMod == nil { - return nil, diags - } - - cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) - diags = append(diags, cDiags...) - - return cfg, diags -} - -// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that -// are presumed to have already been installed. A different function -// (moduleWalkerInstall) is used for installation. -func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { - // Since we're just loading here, we expect that all referenced modules - // will be already installed and described in our manifest. However, we - // do verify that the manifest and the configuration are in agreement - // so that we can prompt the user to run "terraform init" if not. - - key := l.modules.manifest.ModuleKey(req.Path) - record, exists := l.modules.manifest[key] - - if !exists { - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Module not installed", - Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.CallRange, - }, - } - } - - var diags hcl.Diagnostics - - // Check for inconsistencies between manifest and config - if req.SourceAddr != record.SourceAddr { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module source has changed", - Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.SourceAddrRange, - }) - } - if len(req.VersionConstraint.Required) > 0 && record.Version == nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module version requirements have changed", - Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", - Subject: &req.SourceAddrRange, - }) - } - if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module version requirements have changed", - Detail: fmt.Sprintf( - "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", - record.Version, - ), - Subject: &req.SourceAddrRange, - }) - } - - mod, mDiags := l.parser.LoadConfigDir(record.Dir) - diags = append(diags, mDiags...) - if mod == nil { - // nil specifically indicates that the directory does not exist or - // cannot be read, so in this case we'll discard any generic diagnostics - // returned from LoadConfigDir and produce our own context-sensitive - // error message. - return nil, nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Module not installed", - Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), - Subject: &req.CallRange, - }, - } - } - - return mod, record.Version, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go deleted file mode 100644 index 9e83895c..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go +++ /dev/null @@ -1,504 +0,0 @@ -package configload - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "time" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/spf13/afero" -) - -// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously -// creates an in-memory snapshot of the configuration files used, which can -// be later used to create a loader that may read only from this snapshot. -func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { - rootMod, diags := l.parser.LoadConfigDir(rootDir) - if rootMod == nil { - return nil, nil, diags - } - - snap := &Snapshot{ - Modules: map[string]*SnapshotModule{}, - } - walker := l.makeModuleWalkerSnapshot(snap) - cfg, cDiags := configs.BuildConfig(rootMod, walker) - diags = append(diags, cDiags...) - - addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) - diags = append(diags, addDiags...) - - return cfg, snap, diags -} - -// NewLoaderFromSnapshot creates a Loader that reads files only from the -// given snapshot. -// -// A snapshot-based loader cannot install modules, so calling InstallModules -// on the return value will cause a panic. -// -// A snapshot-based loader also has access only to configuration files. Its -// underlying parser does not have access to other files in the native -// filesystem, such as values files. For those, either use a normal loader -// (created by NewLoader) or use the configs.Parser API directly. -func NewLoaderFromSnapshot(snap *Snapshot) *Loader { - fs := snapshotFS{snap} - parser := configs.NewParser(fs) - - ret := &Loader{ - parser: parser, - modules: moduleMgr{ - FS: afero.Afero{Fs: fs}, - CanInstall: false, - manifest: snap.moduleManifest(), - }, - } - - return ret -} - -// Snapshot is an in-memory representation of the source files from a -// configuration, which can be used as an alternative configurations source -// for a loader with NewLoaderFromSnapshot. -// -// The primary purpose of a Snapshot is to build the configuration portion -// of a plan file (see ../../plans/planfile) so that it can later be reloaded -// and used to recover the exact configuration that the plan was built from. -type Snapshot struct { - // Modules is a map from opaque module keys (suitable for use as directory - // names on all supported operating systems) to the snapshot information - // about each module. - Modules map[string]*SnapshotModule -} - -// NewEmptySnapshot constructs and returns a snapshot containing only an empty -// root module. This is not useful for anything except placeholders in tests. -func NewEmptySnapshot() *Snapshot { - return &Snapshot{ - Modules: map[string]*SnapshotModule{ - "": &SnapshotModule{ - Files: map[string][]byte{}, - }, - }, - } -} - -// SnapshotModule represents a single module within a Snapshot. -type SnapshotModule struct { - // Dir is the path, relative to the root directory given when the - // snapshot was created, where the module appears in the snapshot's - // virtual filesystem. - Dir string - - // Files is a map from each configuration file filename for the - // module to a raw byte representation of the source file contents. - Files map[string][]byte - - // SourceAddr is the source address given for this module in configuration. - SourceAddr string `json:"Source"` - - // Version is the version of the module that is installed, or nil if - // the module is installed from a source that does not support versions. - Version *version.Version `json:"-"` -} - -// moduleManifest constructs a module manifest based on the contents of -// the receiving snapshot. -func (s *Snapshot) moduleManifest() modsdir.Manifest { - ret := make(modsdir.Manifest) - - for k, modSnap := range s.Modules { - ret[k] = modsdir.Record{ - Key: k, - Dir: modSnap.Dir, - SourceAddr: modSnap.SourceAddr, - Version: modSnap.Version, - } - } - - return ret -} - -// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit -// the same lookup behaviors as l.moduleWalkerLoad but will additionally write -// source files from the referenced modules into the given snapshot. -func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { - return configs.ModuleWalkerFunc( - func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { - mod, v, diags := l.moduleWalkerLoad(req) - if diags.HasErrors() { - return mod, v, diags - } - - key := l.modules.manifest.ModuleKey(req.Path) - record, exists := l.modules.manifest[key] - - if !exists { - // Should never happen, since otherwise moduleWalkerLoader would've - // returned an error and we would've returned already. - panic(fmt.Sprintf("module %s is not present in manifest", key)) - } - - addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) - diags = append(diags, addDiags...) - - return mod, v, diags - }, - ) -} - -func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { - var diags hcl.Diagnostics - - primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) - if moreDiags.HasErrors() { - // Any diagnostics we get here should be already present - // in diags, so it's weird if we get here but we'll allow it - // and return a general error message in that case. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read directory for module", - Detail: fmt.Sprintf("The source directory %s could not be read", dir), - }) - return diags - } - - snapMod := &SnapshotModule{ - Dir: dir, - Files: map[string][]byte{}, - SourceAddr: sourceAddr, - Version: v, - } - - files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) - files = append(files, primaryFiles...) - files = append(files, overrideFiles...) - sources := l.Sources() // should be populated with all the files we need by now - for _, filePath := range files { - filename := filepath.Base(filePath) - src, exists := sources[filePath] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing source file for snapshot", - Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), - }) - continue - } - snapMod.Files[filepath.Clean(filename)] = src - } - - snap.Modules[key] = snapMod - - return diags -} - -// snapshotFS is an implementation of afero.Fs that reads from a snapshot. -// -// This is not intended as a general-purpose filesystem implementation. Instead, -// it just supports the minimal functionality required to support the -// configuration loader and parser as an implementation detail of creating -// a loader from a snapshot. -type snapshotFS struct { - snap *Snapshot -} - -var _ afero.Fs = snapshotFS{} - -func (fs snapshotFS) Create(name string) (afero.File, error) { - return nil, fmt.Errorf("cannot create file inside configuration snapshot") -} - -func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { - return fmt.Errorf("cannot create directory inside configuration snapshot") -} - -func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { - return fmt.Errorf("cannot create directories inside configuration snapshot") -} - -func (fs snapshotFS) Open(name string) (afero.File, error) { - - // Our "filesystem" is sparsely populated only with the directories - // mentioned by modules in our snapshot, so the high-level process - // for opening a file is: - // - Find the module snapshot corresponding to the containing directory - // - Find the file within that snapshot - // - Wrap the resulting byte slice in a snapshotFile to return - // - // The other possibility handled here is if the given name is for the - // module directory itself, in which case we'll return a snapshotDir - // instead. - // - // This function doesn't try to be incredibly robust in supporting - // different permutations of paths, etc because in practice we only - // need to support the path forms that our own loader and parser will - // generate. - - dir := filepath.Dir(name) - fn := filepath.Base(name) - directDir := filepath.Clean(name) - - // First we'll check to see if this is an exact path for a module directory. - // We need to do this first (rather than as part of the next loop below) - // because a module in a child directory of another module can otherwise - // appear to be a file in that parent directory. - for _, candidate := range fs.snap.Modules { - modDir := filepath.Clean(candidate.Dir) - if modDir == directDir { - // We've matched the module directory itself - filenames := make([]string, 0, len(candidate.Files)) - for n := range candidate.Files { - filenames = append(filenames, n) - } - sort.Strings(filenames) - return snapshotDir{ - filenames: filenames, - }, nil - } - } - - // If we get here then the given path isn't a module directory exactly, so - // we'll treat it as a file path and try to find a module directory it - // could be located in. - var modSnap *SnapshotModule - for _, candidate := range fs.snap.Modules { - modDir := filepath.Clean(candidate.Dir) - if modDir == dir { - modSnap = candidate - break - } - } - if modSnap == nil { - return nil, os.ErrNotExist - } - - src, exists := modSnap.Files[fn] - if !exists { - return nil, os.ErrNotExist - } - - return &snapshotFile{ - src: src, - }, nil -} - -func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { - return fs.Open(name) -} - -func (fs snapshotFS) Remove(name string) error { - return fmt.Errorf("cannot remove file inside configuration snapshot") -} - -func (fs snapshotFS) RemoveAll(path string) error { - return fmt.Errorf("cannot remove files inside configuration snapshot") -} - -func (fs snapshotFS) Rename(old, new string) error { - return fmt.Errorf("cannot rename file inside configuration snapshot") -} - -func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { - f, err := fs.Open(name) - if err != nil { - return nil, err - } - _, isDir := f.(snapshotDir) - return snapshotFileInfo{ - name: filepath.Base(name), - isDir: isDir, - }, nil -} - -func (fs snapshotFS) Name() string { - return "ConfigSnapshotFS" -} - -func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { - return fmt.Errorf("cannot set file mode inside configuration snapshot") -} - -func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { - return fmt.Errorf("cannot set file times inside configuration snapshot") -} - -type snapshotFile struct { - snapshotFileStub - src []byte - at int64 -} - -var _ afero.File = (*snapshotFile)(nil) - -func (f *snapshotFile) Read(p []byte) (n int, err error) { - if len(p) > 0 && f.at == int64(len(f.src)) { - return 0, io.EOF - } - if f.at > int64(len(f.src)) { - return 0, io.ErrUnexpectedEOF - } - if int64(len(f.src))-f.at >= int64(len(p)) { - n = len(p) - } else { - n = int(int64(len(f.src)) - f.at) - } - copy(p, f.src[f.at:f.at+int64(n)]) - f.at += int64(n) - return -} - -func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { - f.at = off - return f.Read(p) -} - -func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { - switch whence { - case 0: - f.at = offset - case 1: - f.at += offset - case 2: - f.at = int64(len(f.src)) + offset - } - return f.at, nil -} - -type snapshotDir struct { - snapshotFileStub - filenames []string - at int -} - -var _ afero.File = snapshotDir{} - -func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { - names, err := f.Readdirnames(count) - if err != nil { - return nil, err - } - ret := make([]os.FileInfo, len(names)) - for i, name := range names { - ret[i] = snapshotFileInfo{ - name: name, - isDir: false, - } - } - return ret, nil -} - -func (f snapshotDir) Readdirnames(count int) ([]string, error) { - var outLen int - names := f.filenames[f.at:] - if count > 0 { - if len(names) < count { - outLen = len(names) - } else { - outLen = count - } - if len(names) == 0 { - return nil, io.EOF - } - } else { - outLen = len(names) - } - f.at += outLen - - return names[:outLen], nil -} - -// snapshotFileInfo is a minimal implementation of os.FileInfo to support our -// virtual filesystem from snapshots. -type snapshotFileInfo struct { - name string - isDir bool -} - -var _ os.FileInfo = snapshotFileInfo{} - -func (fi snapshotFileInfo) Name() string { - return fi.name -} - -func (fi snapshotFileInfo) Size() int64 { - // In practice, our parser and loader never call Size - return -1 -} - -func (fi snapshotFileInfo) Mode() os.FileMode { - return os.ModePerm -} - -func (fi snapshotFileInfo) ModTime() time.Time { - return time.Now() -} - -func (fi snapshotFileInfo) IsDir() bool { - return fi.isDir -} - -func (fi snapshotFileInfo) Sys() interface{} { - return nil -} - -type snapshotFileStub struct{} - -func (f snapshotFileStub) Close() error { - return nil -} - -func (f snapshotFileStub) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("cannot read") -} - -func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { - return 0, fmt.Errorf("cannot read") -} - -func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { - return 0, fmt.Errorf("cannot seek") -} - -func (f snapshotFileStub) Write(p []byte) (n int, err error) { - return f.WriteAt(p, 0) -} - -func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { - return 0, fmt.Errorf("cannot write to file in snapshot") -} - -func (f snapshotFileStub) WriteString(s string) (n int, err error) { - return 0, fmt.Errorf("cannot write to file in snapshot") -} - -func (f snapshotFileStub) Name() string { - // in practice, the loader and parser never use this - return "" -} - -func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { - return nil, fmt.Errorf("cannot use Readdir on a file") -} - -func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { - return nil, fmt.Errorf("cannot use Readdir on a file") -} - -func (f snapshotFileStub) Stat() (os.FileInfo, error) { - return nil, fmt.Errorf("cannot stat") -} - -func (f snapshotFileStub) Sync() error { - return nil -} - -func (f snapshotFileStub) Truncate(size int64) error { - return fmt.Errorf("cannot write to file in snapshot") -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go deleted file mode 100644 index 16871e31..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go +++ /dev/null @@ -1,76 +0,0 @@ -package configload - -import ( - "os" - "path/filepath" - - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/registry" - "github.com/spf13/afero" -) - -type moduleMgr struct { - FS afero.Afero - - // CanInstall is true for a module manager that can support installation. - // - // This must be set only if FS is an afero.OsFs, because the installer - // (which uses go-getter) is not aware of the virtual filesystem - // abstraction and will always write into the "real" filesystem. - CanInstall bool - - // Dir is the path where descendent modules are (or will be) installed. - Dir string - - // Services is a service discovery client that will be used to find - // remote module registry endpoints. This object may be pre-loaded with - // cached discovery information. - Services *disco.Disco - - // Registry is a client for the module registry protocol, which is used - // when a module is requested from a registry source. - Registry *registry.Client - - // manifest tracks the currently-installed modules for this manager. - // - // The loader may read this. Only the installer may write to it, and - // after a set of updates are completed the installer must call - // writeModuleManifestSnapshot to persist a snapshot of the manifest - // to disk for use on subsequent runs. - manifest modsdir.Manifest -} - -func (m *moduleMgr) manifestSnapshotPath() string { - return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) -} - -// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. -func (m *moduleMgr) readModuleManifestSnapshot() error { - r, err := m.FS.Open(m.manifestSnapshotPath()) - if err != nil { - if os.IsNotExist(err) { - // We'll treat a missing file as an empty manifest - m.manifest = make(modsdir.Manifest) - return nil - } - return err - } - - m.manifest, err = modsdir.ReadManifestSnapshot(r) - return err -} - -// writeModuleManifestSnapshot writes a snapshot of the current manifest -// to the filesystem. -// -// The caller must guarantee no concurrent modifications of the manifest for -// the duration of a call to this function, or the behavior is undefined. -func (m *moduleMgr) writeModuleManifestSnapshot() error { - w, err := m.FS.Create(m.manifestSnapshotPath()) - if err != nil { - return err - } - - return m.manifest.WriteSnapshot(w) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go deleted file mode 100644 index 594cf640..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go +++ /dev/null @@ -1,45 +0,0 @@ -package configload - -import ( - "strings" - - "github.com/hashicorp/go-getter" - - "github.com/hashicorp/terraform/registry/regsrc" -) - -var localSourcePrefixes = []string{ - "./", - "../", - ".\\", - "..\\", -} - -func isLocalSourceAddr(addr string) bool { - for _, prefix := range localSourcePrefixes { - if strings.HasPrefix(addr, prefix) { - return true - } - } - return false -} - -func isRegistrySourceAddr(addr string) bool { - _, err := regsrc.ParseModuleSource(addr) - return err == nil -} - -// splitAddrSubdir splits the given address (which is assumed to be a -// registry address or go-getter-style address) into a package portion -// and a sub-directory portion. -// -// The package portion defines what should be downloaded and then the -// sub-directory portion, if present, specifies a sub-directory within -// the downloaded object (an archive, VCS repository, etc) that contains -// the module's configuration files. -// -// The subDir portion will be returned as empty if no subdir separator -// ("//") is present in the address. -func splitAddrSubdir(addr string) (packageAddr, subDir string) { - return getter.SourceDirSubdir(addr) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go deleted file mode 100644 index 86ca9d10..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go +++ /dev/null @@ -1,43 +0,0 @@ -package configload - -import ( - "io/ioutil" - "os" - "testing" -) - -// NewLoaderForTests is a variant of NewLoader that is intended to be more -// convenient for unit tests. -// -// The loader's modules directory is a separate temporary directory created -// for each call. Along with the created loader, this function returns a -// cleanup function that should be called before the test completes in order -// to remove that temporary directory. -// -// In the case of any errors, t.Fatal (or similar) will be called to halt -// execution of the test, so the calling test does not need to handle errors -// itself. -func NewLoaderForTests(t *testing.T) (*Loader, func()) { - t.Helper() - - modulesDir, err := ioutil.TempDir("", "tf-configs") - if err != nil { - t.Fatalf("failed to create temporary modules dir: %s", err) - return nil, func() {} - } - - cleanup := func() { - os.RemoveAll(modulesDir) - } - - loader, err := NewLoader(&Config{ - ModulesDir: modulesDir, - }) - if err != nil { - cleanup() - t.Fatalf("failed to create config loader: %s", err) - return nil, func() {} - } - - return loader, cleanup -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go deleted file mode 100644 index 2c21ca5e..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go +++ /dev/null @@ -1,123 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" -) - -var mapLabelNames = []string{"key"} - -// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body -// using the facilities in the hcldec package. -// -// The returned specification is guaranteed to return a value of the same type -// returned by method ImpliedType, but it may contain null values if any of the -// block attributes are defined as optional and/or computed respectively. -func (b *Block) DecoderSpec() hcldec.Spec { - ret := hcldec.ObjectSpec{} - if b == nil { - return ret - } - - for name, attrS := range b.Attributes { - ret[name] = attrS.decoderSpec(name) - } - - for name, blockS := range b.BlockTypes { - if _, exists := ret[name]; exists { - // This indicates an invalid schema, since it's not valid to - // define both an attribute and a block type of the same name. - // However, we don't raise this here since it's checked by - // InternalValidate. - continue - } - - childSpec := blockS.Block.DecoderSpec() - - // We can only validate 0 or 1 for MinItems, because a dynamic block - // may satisfy any number of min items while only having a single - // block in the config. We cannot validate MaxItems because a - // configuration may have any number of dynamic blocks - minItems := 0 - if blockS.MinItems > 1 { - minItems = 1 - } - - switch blockS.Nesting { - case NestingSingle, NestingGroup: - ret[name] = &hcldec.BlockSpec{ - TypeName: name, - Nested: childSpec, - Required: blockS.MinItems == 1, - } - if blockS.Nesting == NestingGroup { - ret[name] = &hcldec.DefaultSpec{ - Primary: ret[name], - Default: &hcldec.LiteralSpec{ - Value: blockS.EmptyValue(), - }, - } - } - case NestingList: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockTupleSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } else { - ret[name] = &hcldec.BlockListSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - } - case NestingSet: - // We forbid dynamically-typed attributes inside NestingSet in - // InternalValidate, so we don't do anything special to handle - // that here. (There is no set analog to tuple and object types, - // because cty's set implementation depends on knowing the static - // type in order to properly compute its internal hashes.) - ret[name] = &hcldec.BlockSetSpec{ - TypeName: name, - Nested: childSpec, - MinItems: minItems, - } - case NestingMap: - // We prefer to use a list where possible, since it makes our - // implied type more complete, but if there are any - // dynamically-typed attributes inside we must use a tuple - // instead, at the expense of our type then not being predictable. - if blockS.Block.ImpliedType().HasDynamicTypes() { - ret[name] = &hcldec.BlockObjectSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } else { - ret[name] = &hcldec.BlockMapSpec{ - TypeName: name, - Nested: childSpec, - LabelNames: mapLabelNames, - } - } - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. - continue - } - } - - return ret -} - -func (a *Attribute) decoderSpec(name string) hcldec.Spec { - return &hcldec.AttrSpec{ - Name: name, - Type: a.Type, - Required: a.Required, - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go deleted file mode 100644 index a81b7eab..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go +++ /dev/null @@ -1,42 +0,0 @@ -package configschema - -import ( - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty.Type that would result from decoding a -// configuration block using the receiving block schema. -// -// ImpliedType always returns a result, even if the given schema is -// inconsistent. Code that creates configschema.Block objects should be -// tested using the InternalValidate method to detect any inconsistencies -// that would cause this method to fall back on defaults and assumptions. -func (b *Block) ImpliedType() cty.Type { - if b == nil { - return cty.EmptyObject - } - - return hcldec.ImpliedType(b.DecoderSpec()) -} - -// ContainsSensitive returns true if any of the attributes of the receiving -// block or any of its descendent blocks are marked as sensitive. -// -// Blocks themselves cannot be sensitive as a whole -- sensitivity is a -// per-attribute idea -- but sometimes we want to include a whole object -// decoded from a block in some UI output, and that is safe to do only if -// none of the contained attributes are sensitive. -func (b *Block) ContainsSensitive() bool { - for _, attrS := range b.Attributes { - if attrS.Sensitive { - return true - } - } - for _, blockS := range b.BlockTypes { - if blockS.ContainsSensitive() { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go deleted file mode 100644 index ebf1abba..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go +++ /dev/null @@ -1,105 +0,0 @@ -package configschema - -import ( - "fmt" - "regexp" - - "github.com/zclconf/go-cty/cty" - - multierror "github.com/hashicorp/go-multierror" -) - -var validName = regexp.MustCompile(`^[a-z0-9_]+$`) - -// InternalValidate returns an error if the receiving block and its child -// schema definitions have any consistencies with the documented rules for -// valid schema. -// -// This is intended to be used within unit tests to detect when a given -// schema is invalid. -func (b *Block) InternalValidate() error { - if b == nil { - return fmt.Errorf("top-level block schema is nil") - } - return b.internalValidate("", nil) - -} - -func (b *Block) internalValidate(prefix string, err error) error { - for name, attrS := range b.Attributes { - if attrS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) - continue - } - if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { - err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) - } - if attrS.Optional && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) - } - if attrS.Computed && attrS.Required { - err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) - } - if attrS.Type == cty.NilType { - err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) - } - } - - for name, blockS := range b.BlockTypes { - if blockS == nil { - err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) - continue - } - - if _, isAttr := b.Attributes[name]; isAttr { - err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) - } else if !validName.MatchString(name) { - err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) - } - - if blockS.MinItems < 0 || blockS.MaxItems < 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) - } - - switch blockS.Nesting { - case NestingSingle: - switch { - case blockS.MinItems != blockS.MaxItems: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) - case blockS.MinItems < 0 || blockS.MinItems > 1: - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) - } - case NestingGroup: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) - } - case NestingList, NestingSet: - if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) - } - if blockS.Nesting == NestingSet { - ety := blockS.Block.ImpliedType() - if ety.HasDynamicTypes() { - // This is not permitted because the HCL (cty) set implementation - // needs to know the exact type of set elements in order to - // properly hash them, and so can't support mixed types. - err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) - } - } - case NestingMap: - if blockS.MinItems != 0 || blockS.MaxItems != 0 { - err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) - } - default: - err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) - } - - subPrefix := prefix + name + "." - err = blockS.Block.internalValidate(subPrefix, err) - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go deleted file mode 100644 index 0be3b8fa..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go +++ /dev/null @@ -1,38 +0,0 @@ -package configschema - -// NoneRequired returns a deep copy of the receiver with any required -// attributes translated to optional. -func (b *Block) NoneRequired() *Block { - ret := &Block{} - - if b.Attributes != nil { - ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) - } - for name, attrS := range b.Attributes { - ret.Attributes[name] = attrS.forceOptional() - } - - if b.BlockTypes != nil { - ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) - } - for name, blockS := range b.BlockTypes { - ret.BlockTypes[name] = blockS.noneRequired() - } - - return ret -} - -func (b *NestedBlock) noneRequired() *NestedBlock { - ret := *b - ret.Block = *(ret.Block.NoneRequired()) - ret.MinItems = 0 - ret.MaxItems = 0 - return &ret -} - -func (a *Attribute) forceOptional() *Attribute { - ret := *a - ret.Optional = true - ret.Required = false - return &ret -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go deleted file mode 100644 index f4702d36..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go +++ /dev/null @@ -1,130 +0,0 @@ -package configschema - -import ( - "github.com/zclconf/go-cty/cty" -) - -// Block represents a configuration block. -// -// "Block" here is a logical grouping construct, though it happens to map -// directly onto the physical block syntax of Terraform's native configuration -// syntax. It may be a more a matter of convention in other syntaxes, such as -// JSON. -// -// When converted to a value, a Block always becomes an instance of an object -// type derived from its defined attributes and nested blocks -type Block struct { - // Attributes describes any attributes that may appear directly inside - // the block. - Attributes map[string]*Attribute - - // BlockTypes describes any nested block types that may appear directly - // inside the block. - BlockTypes map[string]*NestedBlock -} - -// Attribute represents a configuration attribute, within a block. -type Attribute struct { - // Type is a type specification that the attribute's value must conform to. - Type cty.Type - - // Description is an English-language description of the purpose and - // usage of the attribute. A description should be concise and use only - // one or two sentences, leaving full definition to longer-form - // documentation defined elsewhere. - Description string - - // Required, if set to true, specifies that an omitted or null value is - // not permitted. - Required bool - - // Optional, if set to true, specifies that an omitted or null value is - // permitted. This field conflicts with Required. - Optional bool - - // Computed, if set to true, specifies that the value comes from the - // provider rather than from configuration. If combined with Optional, - // then the config may optionally provide an overridden value. - Computed bool - - // Sensitive, if set to true, indicates that an attribute may contain - // sensitive information. - // - // At present nothing is done with this information, but callers are - // encouraged to set it where appropriate so that it may be used in the - // future to help Terraform mask sensitive information. (Terraform - // currently achieves this in a limited sense via other mechanisms.) - Sensitive bool -} - -// NestedBlock represents the embedding of one block within another. -type NestedBlock struct { - // Block is the description of the block that's nested. - Block - - // Nesting provides the nesting mode for the child block, which determines - // how many instances of the block are allowed, how many labels it expects, - // and how the resulting data will be converted into a data structure. - Nesting NestingMode - - // MinItems and MaxItems set, for the NestingList and NestingSet nesting - // modes, lower and upper limits on the number of child blocks allowed - // of the given type. If both are left at zero, no limit is applied. - // - // As a special case, both values can be set to 1 for NestingSingle in - // order to indicate that a particular single block is required. - // - // These fields are ignored for other nesting modes and must both be left - // at zero. - MinItems, MaxItems int -} - -// NestingMode is an enumeration of modes for nesting blocks inside other -// blocks. -type NestingMode int - -//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode - -const ( - nestingModeInvalid NestingMode = iota - - // NestingSingle indicates that only a single instance of a given - // block type is permitted, with no labels, and its content should be - // provided directly as an object value. - NestingSingle - - // NestingGroup is similar to NestingSingle in that it calls for only a - // single instance of a given block type with no labels, but it additonally - // guarantees that its result will never be null, even if the block is - // absent, and instead the nested attributes and blocks will be treated - // as absent in that case. (Any required attributes or blocks within the - // nested block are not enforced unless the block is explicitly present - // in the configuration, so they are all effectively optional when the - // block is not present.) - // - // This is useful for the situation where a remote API has a feature that - // is always enabled but has a group of settings related to that feature - // that themselves have default values. By using NestingGroup instead of - // NestingSingle in that case, generated plans will show the block as - // present even when not present in configuration, thus allowing any - // default values within to be displayed to the user. - NestingGroup - - // NestingList indicates that multiple blocks of the given type are - // permitted, with no labels, and that their corresponding objects should - // be provided in a list. - NestingList - - // NestingSet indicates that multiple blocks of the given type are - // permitted, with no labels, and that their corresponding objects should - // be provided in a set. - NestingSet - - // NestingMap indicates that multiple blocks of the given type are - // permitted, each with a single label, and that their corresponding - // objects should be provided in a map whose keys are the labels. - // - // It's an error, therefore, to use the same label value on multiple - // blocks. - NestingMap -) diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go deleted file mode 100644 index 9fc2de38..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go +++ /dev/null @@ -1,173 +0,0 @@ -package configschema - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/helper/didyoumean" - "github.com/hashicorp/terraform/tfdiags" -) - -// StaticValidateTraversal checks whether the given traversal (which must be -// relative) refers to a construct in the receiving schema, returning error -// diagnostics if any problems are found. -// -// This method is "optimistic" in that it will not return errors for possible -// problems that cannot be detected statically. It is possible that an -// traversal which passed static validation will still fail when evaluated. -func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { - if !traversal.IsRelative() { - panic("StaticValidateTraversal on absolute traversal") - } - if len(traversal) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - next := traversal[0] - after := traversal[1:] - - var name string - switch step := next.(type) { - case hcl.TraverseAttr: - name = step.Name - case hcl.TraverseIndex: - // No other traversal step types are allowed directly at a block. - // If it looks like the user was trying to use index syntax to - // access an attribute then we'll produce a specialized message. - key := step.Key - if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { - maybeName := key.AsString() - if hclsyntax.ValidIdentifier(maybeName) { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), - Subject: &step.SrcRange, - }) - return diags - } - } - // If it looks like some other kind of index then we'll use a generic error. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid index operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: &step.SrcRange, - }) - return diags - default: - // No other traversal types should appear in a normal valid traversal, - // but we'll handle this with a generic error anyway to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: `Only attribute access is allowed here, using the dot operator.`, - Subject: next.SourceRange().Ptr(), - }) - return diags - } - - if attrS, exists := b.Attributes[name]; exists { - // For attribute validation we will just apply the rest of the - // traversal to an unknown value of the attribute type and pass - // through HCL's own errors, since we don't want to replicate all of - // HCL's type checking rules here. - val := cty.UnknownVal(attrS.Type) - _, hclDiags := after.TraverseRel(val) - diags = diags.Append(hclDiags) - return diags - } - - if blockS, exists := b.BlockTypes[name]; exists { - moreDiags := blockS.staticValidateTraversal(name, after) - diags = diags.Append(moreDiags) - return diags - } - - // If we get here then the name isn't valid at all. We'll collect up - // all of the names that _are_ valid to use as suggestions. - var suggestions []string - for name := range b.Attributes { - suggestions = append(suggestions, name) - } - for name := range b.BlockTypes { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unsupported attribute`, - Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), - Subject: next.SourceRange().Ptr(), - }) - - return diags -} - -func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { - if b.Nesting == NestingSingle || b.Nesting == NestingGroup { - // Single blocks are easy: just pass right through. - return b.Block.StaticValidateTraversal(traversal) - } - - if len(traversal) == 0 { - // It's always valid to access a nested block's attribute directly. - return nil - } - - var diags tfdiags.Diagnostics - next := traversal[0] - after := traversal[1:] - - switch b.Nesting { - - case NestingSet: - // Can't traverse into a set at all, since it does not have any keys - // to index with. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Cannot index a set value`, - Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), - Subject: next.SourceRange().Ptr(), - }) - return diags - - case NestingList: - if _, ok := next.(hcl.TraverseIndex); ok { - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - } else { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid operation`, - Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), - Subject: next.SourceRange().Ptr(), - }) - } - return diags - - case NestingMap: - // Both attribute and index steps are valid for maps, so we'll just - // pass through here and let normal evaluation catch an - // incorrectly-typed index key later, if present. - moreDiags := b.Block.StaticValidateTraversal(after) - diags = diags.Append(moreDiags) - return diags - - default: - // Invalid nesting type is just ignored. It's checked by - // InternalValidate. (Note that we handled NestingSingle separately - // back at the start of this function.) - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go deleted file mode 100644 index 036c2d6c..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/depends_on.go +++ /dev/null @@ -1,23 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { - var ret []hcl.Traversal - exprs, diags := hcl.ExprList(attr.Expr) - - for _, expr := range exprs { - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - ret = append(ret, traversal) - } - } - - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go deleted file mode 100644 index f01eb79f..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package configs contains types that represent Terraform configurations and -// the different elements thereof. -// -// The functionality in this package can be used for some static analyses of -// Terraform configurations, but this package generally exposes representations -// of the configuration source code rather than the result of evaluating these -// objects. The sibling package "lang" deals with evaluation of structures -// and expressions in the configuration. -// -// Due to its close relationship with HCL, this package makes frequent use -// of types from the HCL API, including raw HCL diagnostic messages. Such -// diagnostics can be converted into Terraform-flavored diagnostics, if needed, -// using functions in the sibling package tfdiags. -// -// The Parser type is the main entry-point into this package. The LoadConfigDir -// method can be used to load a single module directory, and then a full -// configuration (including any descendent modules) can be produced using -// the top-level BuildConfig method. -package configs diff --git a/vendor/github.com/hashicorp/terraform/configs/experiments.go b/vendor/github.com/hashicorp/terraform/configs/experiments.go deleted file mode 100644 index 435bac11..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/experiments.go +++ /dev/null @@ -1,156 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/experiments" -) - -// sniffActiveExperiments does minimal parsing of the given body for -// "terraform" blocks with "experiments" attributes, returning the -// experiments found. -// -// This is separate from other processing so that we can be sure that all of -// the experiments are known before we process the result of the module config, -// and thus we can take into account which experiments are active when deciding -// how to decode. -func sniffActiveExperiments(body hcl.Body) (experiments.Set, hcl.Diagnostics) { - rootContent, _, diags := body.PartialContent(configFileTerraformBlockSniffRootSchema) - - ret := experiments.NewSet() - - for _, block := range rootContent.Blocks { - content, _, blockDiags := block.Body.PartialContent(configFileExperimentsSniffBlockSchema) - diags = append(diags, blockDiags...) - - attr, exists := content.Attributes["experiments"] - if !exists { - continue - } - - exps, expDiags := decodeExperimentsAttr(attr) - diags = append(diags, expDiags...) - if !expDiags.HasErrors() { - ret = experiments.SetUnion(ret, exps) - } - } - - return ret, diags -} - -func decodeExperimentsAttr(attr *hcl.Attribute) (experiments.Set, hcl.Diagnostics) { - var diags hcl.Diagnostics - - exprs, moreDiags := hcl.ExprList(attr.Expr) - diags = append(diags, moreDiags...) - if moreDiags.HasErrors() { - return nil, diags - } - - var ret = experiments.NewSet() - for _, expr := range exprs { - kw := hcl.ExprAsKeyword(expr) - if kw == "" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid experiment keyword", - Detail: "Elements of \"experiments\" must all be keywords representing active experiments.", - Subject: expr.Range().Ptr(), - }) - continue - } - - exp, err := experiments.GetCurrent(kw) - switch err := err.(type) { - case experiments.UnavailableError: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unknown experiment keyword", - Detail: fmt.Sprintf("There is no current experiment with the keyword %q.", kw), - Subject: expr.Range().Ptr(), - }) - case experiments.ConcludedError: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Experiment has concluded", - Detail: fmt.Sprintf("Experiment %q is no longer available. %s", kw, err.Message), - Subject: expr.Range().Ptr(), - }) - case nil: - // No error at all means it's valid and current. - ret.Add(exp) - - // However, experimental features are subject to breaking changes - // in future releases, so we'll warn about them to help make sure - // folks aren't inadvertently using them in places where that'd be - // inappropriate, particularly if the experiment is active in a - // shared module they depend on. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: fmt.Sprintf("Experimental feature %q is active", exp.Keyword()), - Detail: "Experimental features are subject to breaking changes in future minor or patch releases, based on feedback.\n\nIf you have feedback on the design of this feature, please open a GitHub issue to discuss it.", - Subject: expr.Range().Ptr(), - }) - - default: - // This should never happen, because GetCurrent is not documented - // to return any other error type, but we'll handle it to be robust. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid experiment keyword", - Detail: fmt.Sprintf("Could not parse %q as an experiment keyword: %s.", kw, err.Error()), - Subject: expr.Range().Ptr(), - }) - } - } - return ret, diags -} - -func checkModuleExperiments(m *Module) hcl.Diagnostics { - var diags hcl.Diagnostics - - // When we have current experiments, this is a good place to check that - // the features in question can only be used when the experiments are - // active. Return error diagnostics if a feature is being used without - // opting in to the feature. For example: - /* - if !m.ActiveExperiments.Has(experiments.ResourceForEach) { - for _, rc := range m.ManagedResources { - if rc.ForEach != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Resource for_each is experimental", - Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding resource_for_each to the list of active experiments.", - Subject: rc.ForEach.Range().Ptr(), - }) - } - } - for _, rc := range m.DataResources { - if rc.ForEach != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Resource for_each is experimental", - Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding resource_for_each to the list of active experiments.", - Subject: rc.ForEach.Range().Ptr(), - }) - } - } - } - */ - - if !m.ActiveExperiments.Has(experiments.VariableValidation) { - for _, vc := range m.Variables { - if len(vc.Validations) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Custom variable validation is experimental", - Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding variable_validation to the list of active experiments.", - Subject: vc.Validations[0].DeclRange.Ptr(), - }) - } - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go deleted file mode 100644 index 3403c026..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/paths.go +++ /dev/null @@ -1,276 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -// RequiresReplace takes a list of flatmapped paths from a -// InstanceDiff.Attributes along with the corresponding cty.Type, and returns -// the list of the cty.Paths that are flagged as causing the resource -// replacement (RequiresNew). -// This will filter out redundant paths, paths that refer to flatmapped indexes -// (e.g. "#", "%"), and will return any changes within a set as the path to the -// set itself. -func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { - var paths []cty.Path - - for _, attr := range attrs { - p, err := requiresReplacePath(attr, ty) - if err != nil { - return nil, err - } - - paths = append(paths, p) - } - - // now trim off any trailing paths that aren't GetAttrSteps, since only an - // attribute itself can require replacement - paths = trimPaths(paths) - - // There may be redundant paths due to set elements or index attributes - // Do some ugly n^2 filtering, but these are always fairly small sets. - for i := 0; i < len(paths)-1; i++ { - for j := i + 1; j < len(paths); j++ { - if reflect.DeepEqual(paths[i], paths[j]) { - // swap the tail and slice it off - paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] - paths = paths[:len(paths)-1] - j-- - } - } - } - - return paths, nil -} - -// trimPaths removes any trailing steps that aren't of type GetAttrSet, since -// only an attribute itself can require replacement -func trimPaths(paths []cty.Path) []cty.Path { - var trimmed []cty.Path - for _, path := range paths { - path = trimPath(path) - if len(path) > 0 { - trimmed = append(trimmed, path) - } - } - return trimmed -} - -func trimPath(path cty.Path) cty.Path { - for len(path) > 0 { - _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) - if isGetAttr { - break - } - path = path[:len(path)-1] - } - return path -} - -// requiresReplacePath takes a key from a flatmap along with the cty.Type -// describing the structure, and returns the cty.Path that would be used to -// reference the nested value in the data structure. -// This is used specifically to record the RequiresReplace attributes from a -// ResourceInstanceDiff. -func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { - if k == "" { - return nil, nil - } - if !ty.IsObjectType() { - panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) - } - - path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) - if err != nil { - return path, fmt.Errorf("[%s] %s", k, err) - } - return path, nil -} - -func pathSplit(p string) (string, string) { - parts := strings.SplitN(p, ".", 2) - head := parts[0] - rest := "" - if len(parts) > 1 { - rest = parts[1] - } - return head, rest -} - -func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { - k, rest := pathSplit(key) - - path := cty.Path{cty.GetAttrStep{Name: k}} - - ty, ok := atys[k] - if !ok { - return path, fmt.Errorf("attribute %q not found", k) - } - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - switch { - case ty.IsPrimitiveType(): - err = fmt.Errorf("invalid step %q with type %#v", key, ty) - case ty.IsObjectType(): - path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) - case ty.IsTupleType(): - path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) - case ty.IsMapType(): - path, err = pathFromFlatmapKeyMap(key, ty) - case ty.IsListType(): - path, err = pathFromFlatmapKeyList(key, ty) - case ty.IsSetType(): - path, err = pathFromFlatmapKeySet(key, ty) - default: - err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) - } - - if err != nil { - return path, err - } - - return path, nil -} - -func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := pathSplit(key) - - // we don't need to convert the index keys to paths - if k == "#" { - return path, nil - } - - idx, err := strconv.Atoi(k) - if err != nil { - return path, err - } - - path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} - - if idx >= len(etys) { - return path, fmt.Errorf("index %s out of range in %#v", key, etys) - } - - if rest == "" { - return path, nil - } - - ty := etys[idx] - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := key, "" - if !ty.ElementType().IsPrimitiveType() { - k, rest = pathSplit(key) - } - - // we don't need to convert the index keys to paths - if k == "%" { - return path, nil - } - - path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { - var path cty.Path - var err error - - k, rest := pathSplit(key) - - // we don't need to convert the index keys to paths - if key == "#" { - return path, nil - } - - idx, err := strconv.Atoi(k) - if err != nil { - return path, err - } - - path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} - - if rest == "" { - return path, nil - } - - p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) - if err != nil { - return path, err - } - - return append(path, p...), nil -} - -func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { - // once we hit a set, we can't return consistent paths, so just mark the - // set as a whole changed. - return nil, nil -} - -// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for -// use in generating legacy style diffs. -func FlatmapKeyFromPath(path cty.Path) string { - var parts []string - - for _, step := range path { - switch step := step.(type) { - case cty.GetAttrStep: - parts = append(parts, step.Name) - case cty.IndexStep: - switch ty := step.Key.Type(); { - case ty == cty.String: - parts = append(parts, step.Key.AsString()) - case ty == cty.Number: - i, _ := step.Key.AsBigFloat().Int64() - parts = append(parts, strconv.Itoa(int(i))) - } - } - } - - return strings.Join(parts, ".") -} diff --git a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go deleted file mode 100644 index daeb0b8e..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/hcl2shim/values.go +++ /dev/null @@ -1,353 +0,0 @@ -package hcl2shim - -import ( - "fmt" - "math/big" - - "github.com/hashicorp/hil/ast" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// UnknownVariableValue is a sentinel value that can be used -// to denote that the value of a variable is unknown at this time. -// RawConfig uses this information to build up data about -// unknown keys. -const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for -// known object values and uses the provided block schema to perform some -// additional normalization to better mimic the shape of value that the old -// HCL1/HIL-based codepaths would've produced. -// -// In particular, it discards the collections that we use to represent nested -// blocks (other than NestingSingle) if they are empty, which better mimics -// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't -// know that an unspecified block _could_ exist. -// -// The given object value must conform to the schema's implied type or this -// function will panic or produce incorrect results. -// -// This is primarily useful for the final transition from new-style values to -// terraform.ResourceConfig before calling to a legacy provider, since -// helper/schema (the old provider SDK) is particularly sensitive to these -// subtle differences within its validation code. -func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { - if v.IsNull() { - return nil - } - if !v.IsKnown() { - panic("ConfigValueFromHCL2Block used with unknown value") - } - if !v.Type().IsObjectType() { - panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) - } - - atys := v.Type().AttributeTypes() - ret := make(map[string]interface{}) - - for name := range schema.Attributes { - if _, exists := atys[name]; !exists { - continue - } - - av := v.GetAttr(name) - if av.IsNull() { - // Skip nulls altogether, to better mimic how HCL1 would behave - continue - } - ret[name] = ConfigValueFromHCL2(av) - } - - for name, blockS := range schema.BlockTypes { - if _, exists := atys[name]; !exists { - continue - } - bv := v.GetAttr(name) - if !bv.IsKnown() { - ret[name] = UnknownVariableValue - continue - } - if bv.IsNull() { - continue - } - - switch blockS.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) - - case configschema.NestingList, configschema.NestingSet: - l := bv.LengthInt() - if l == 0 { - // skip empty collections to better mimic how HCL1 would behave - continue - } - - elems := make([]interface{}, 0, l) - for it := bv.ElementIterator(); it.Next(); { - _, ev := it.Element() - if !ev.IsKnown() { - elems = append(elems, UnknownVariableValue) - continue - } - elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) - } - ret[name] = elems - - case configschema.NestingMap: - if bv.LengthInt() == 0 { - // skip empty collections to better mimic how HCL1 would behave - continue - } - - elems := make(map[string]interface{}) - for it := bv.ElementIterator(); it.Next(); { - ek, ev := it.Element() - if !ev.IsKnown() { - elems[ek.AsString()] = UnknownVariableValue - continue - } - elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) - } - ret[name] = elems - } - } - - return ret -} - -// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic -// types library that HCL2 uses) to a value type that matches what would've -// been produced from the HCL-based interpolator for an equivalent structure. -// -// This function will transform a cty null value into a Go nil value, which -// isn't a possible outcome of the HCL/HIL-based decoder and so callers may -// need to detect and reject any null values. -func ConfigValueFromHCL2(v cty.Value) interface{} { - if !v.IsKnown() { - return UnknownVariableValue - } - if v.IsNull() { - return nil - } - - switch v.Type() { - case cty.Bool: - return v.True() // like HCL.BOOL - case cty.String: - return v.AsString() // like HCL token.STRING or token.HEREDOC - case cty.Number: - // We can't match HCL _exactly_ here because it distinguishes between - // int and float values, but we'll get as close as we can by using - // an int if the number is exactly representable, and a float if not. - // The conversion to float will force precision to that of a float64, - // which is potentially losing information from the specific number - // given, but no worse than what HCL would've done in its own conversion - // to float. - - f := v.AsBigFloat() - if i, acc := f.Int64(); acc == big.Exact { - // if we're on a 32-bit system and the number is too big for 32-bit - // int then we'll fall through here and use a float64. - const MaxInt = int(^uint(0) >> 1) - const MinInt = -MaxInt - 1 - if i <= int64(MaxInt) && i >= int64(MinInt) { - return int(i) // Like HCL token.NUMBER - } - } - - f64, _ := f.Float64() - return f64 // like HCL token.FLOAT - } - - if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { - l := make([]interface{}, 0, v.LengthInt()) - it := v.ElementIterator() - for it.Next() { - _, ev := it.Element() - l = append(l, ConfigValueFromHCL2(ev)) - } - return l - } - - if v.Type().IsMapType() || v.Type().IsObjectType() { - l := make(map[string]interface{}) - it := v.ElementIterator() - for it.Next() { - ek, ev := it.Element() - cv := ConfigValueFromHCL2(ev) - if cv != nil { - l[ek.AsString()] = cv - } - } - return l - } - - // If we fall out here then we have some weird type that we haven't - // accounted for. This should never happen unless the caller is using - // capsule types, and we don't currently have any such types defined. - panic(fmt.Errorf("can't convert %#v to config value", v)) -} - -// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes -// a value as would be returned from the old interpolator and turns it into -// a cty.Value so it can be used within, for example, an HCL2 EvalContext. -func HCL2ValueFromConfigValue(v interface{}) cty.Value { - if v == nil { - return cty.NullVal(cty.DynamicPseudoType) - } - if v == UnknownVariableValue { - return cty.DynamicVal - } - - switch tv := v.(type) { - case bool: - return cty.BoolVal(tv) - case string: - return cty.StringVal(tv) - case int: - return cty.NumberIntVal(int64(tv)) - case float64: - return cty.NumberFloatVal(tv) - case []interface{}: - vals := make([]cty.Value, len(tv)) - for i, ev := range tv { - vals[i] = HCL2ValueFromConfigValue(ev) - } - return cty.TupleVal(vals) - case map[string]interface{}: - vals := map[string]cty.Value{} - for k, ev := range tv { - vals[k] = HCL2ValueFromConfigValue(ev) - } - return cty.ObjectVal(vals) - default: - // HCL/HIL should never generate anything that isn't caught by - // the above, so if we get here something has gone very wrong. - panic(fmt.Errorf("can't convert %#v to cty.Value", v)) - } -} - -func HILVariableFromHCL2Value(v cty.Value) ast.Variable { - if v.IsNull() { - // Caller should guarantee/check this before calling - panic("Null values cannot be represented in HIL") - } - if !v.IsKnown() { - return ast.Variable{ - Type: ast.TypeUnknown, - Value: UnknownVariableValue, - } - } - - switch v.Type() { - case cty.Bool: - return ast.Variable{ - Type: ast.TypeBool, - Value: v.True(), - } - case cty.Number: - v := ConfigValueFromHCL2(v) - switch tv := v.(type) { - case int: - return ast.Variable{ - Type: ast.TypeInt, - Value: tv, - } - case float64: - return ast.Variable{ - Type: ast.TypeFloat, - Value: tv, - } - default: - // should never happen - panic("invalid return value for configValueFromHCL2") - } - case cty.String: - return ast.Variable{ - Type: ast.TypeString, - Value: v.AsString(), - } - } - - if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { - l := make([]ast.Variable, 0, v.LengthInt()) - it := v.ElementIterator() - for it.Next() { - _, ev := it.Element() - l = append(l, HILVariableFromHCL2Value(ev)) - } - // If we were given a tuple then this could actually produce an invalid - // list with non-homogenous types, which we expect to be caught inside - // HIL just like a user-supplied non-homogenous list would be. - return ast.Variable{ - Type: ast.TypeList, - Value: l, - } - } - - if v.Type().IsMapType() || v.Type().IsObjectType() { - l := make(map[string]ast.Variable) - it := v.ElementIterator() - for it.Next() { - ek, ev := it.Element() - l[ek.AsString()] = HILVariableFromHCL2Value(ev) - } - // If we were given an object then this could actually produce an invalid - // map with non-homogenous types, which we expect to be caught inside - // HIL just like a user-supplied non-homogenous map would be. - return ast.Variable{ - Type: ast.TypeMap, - Value: l, - } - } - - // If we fall out here then we have some weird type that we haven't - // accounted for. This should never happen unless the caller is using - // capsule types, and we don't currently have any such types defined. - panic(fmt.Errorf("can't convert %#v to HIL variable", v)) -} - -func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { - switch v.Type { - case ast.TypeList: - vals := make([]cty.Value, len(v.Value.([]ast.Variable))) - for i, ev := range v.Value.([]ast.Variable) { - vals[i] = HCL2ValueFromHILVariable(ev) - } - return cty.TupleVal(vals) - case ast.TypeMap: - vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) - for k, ev := range v.Value.(map[string]ast.Variable) { - vals[k] = HCL2ValueFromHILVariable(ev) - } - return cty.ObjectVal(vals) - default: - return HCL2ValueFromConfigValue(v.Value) - } -} - -func HCL2TypeForHILType(hilType ast.Type) cty.Type { - switch hilType { - case ast.TypeAny: - return cty.DynamicPseudoType - case ast.TypeUnknown: - return cty.DynamicPseudoType - case ast.TypeBool: - return cty.Bool - case ast.TypeInt: - return cty.Number - case ast.TypeFloat: - return cty.Number - case ast.TypeString: - return cty.String - case ast.TypeList: - return cty.List(cty.DynamicPseudoType) - case ast.TypeMap: - return cty.Map(cty.DynamicPseudoType) - default: - return cty.NilType // equilvalent to ast.TypeInvalid - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go deleted file mode 100644 index 010dc9ae..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module.go +++ /dev/null @@ -1,427 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/experiments" -) - -// Module is a container for a set of configuration constructs that are -// evaluated within a common namespace. -type Module struct { - // SourceDir is the filesystem directory that the module was loaded from. - // - // This is populated automatically only for configurations loaded with - // LoadConfigDir. If the parser is using a virtual filesystem then the - // path here will be in terms of that virtual filesystem. - - // Any other caller that constructs a module directly with NewModule may - // assign a suitable value to this attribute before using it for other - // purposes. It should be treated as immutable by all consumers of Module - // values. - SourceDir string - - CoreVersionConstraints []VersionConstraint - - ActiveExperiments experiments.Set - - Backend *Backend - ProviderConfigs map[string]*Provider - ProviderRequirements map[string]ProviderRequirements - - Variables map[string]*Variable - Locals map[string]*Local - Outputs map[string]*Output - - ModuleCalls map[string]*ModuleCall - - ManagedResources map[string]*Resource - DataResources map[string]*Resource -} - -// File describes the contents of a single configuration file. -// -// Individual files are not usually used alone, but rather combined together -// with other files (conventionally, those in the same directory) to produce -// a *Module, using NewModule. -// -// At the level of an individual file we represent directly the structural -// elements present in the file, without any attempt to detect conflicting -// declarations. A File object can therefore be used for some basic static -// analysis of individual elements, but must be built into a Module to detect -// duplicate declarations. -type File struct { - CoreVersionConstraints []VersionConstraint - - ActiveExperiments experiments.Set - - Backends []*Backend - ProviderConfigs []*Provider - RequiredProviders []*RequiredProvider - - Variables []*Variable - Locals []*Local - Outputs []*Output - - ModuleCalls []*ModuleCall - - ManagedResources []*Resource - DataResources []*Resource -} - -// NewModule takes a list of primary files and a list of override files and -// produces a *Module by combining the files together. -// -// If there are any conflicting declarations in the given files -- for example, -// if the same variable name is defined twice -- then the resulting module -// will be incomplete and error diagnostics will be returned. Careful static -// analysis of the returned Module is still possible in this case, but the -// module will probably not be semantically valid. -func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { - var diags hcl.Diagnostics - mod := &Module{ - ProviderConfigs: map[string]*Provider{}, - ProviderRequirements: map[string]ProviderRequirements{}, - Variables: map[string]*Variable{}, - Locals: map[string]*Local{}, - Outputs: map[string]*Output{}, - ModuleCalls: map[string]*ModuleCall{}, - ManagedResources: map[string]*Resource{}, - DataResources: map[string]*Resource{}, - } - - for _, file := range primaryFiles { - fileDiags := mod.appendFile(file) - diags = append(diags, fileDiags...) - } - - for _, file := range overrideFiles { - fileDiags := mod.mergeFile(file) - diags = append(diags, fileDiags...) - } - - diags = append(diags, checkModuleExperiments(mod)...) - - return mod, diags -} - -// ResourceByAddr returns the configuration for the resource with the given -// address, or nil if there is no such resource. -func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { - key := addr.String() - switch addr.Mode { - case addrs.ManagedResourceMode: - return m.ManagedResources[key] - case addrs.DataResourceMode: - return m.DataResources[key] - default: - return nil - } -} - -func (m *Module) appendFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - for _, constraint := range file.CoreVersionConstraints { - // If there are any conflicting requirements then we'll catch them - // when we actually check these constraints. - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - - m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments) - - for _, b := range file.Backends { - if m.Backend != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), - Subject: &b.DeclRange, - }) - continue - } - m.Backend = b - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - if existing, exists := m.ProviderConfigs[key]; exists { - if existing.Alias == "" { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } else { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider configuration", - Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), - Subject: &pc.DeclRange, - }) - } - continue - } - m.ProviderConfigs[key] = pc - } - - for _, reqd := range file.RequiredProviders { - // TODO: once the remaining provider source functionality is - // implemented, get addrs.Provider from source if set, or - // addrs.NewDefaultProvider(name) if not - if reqd.Source != "" { - panic("source is not yet supported") - } - fqn := addrs.NewLegacyProvider(reqd.Name) - if existing, exists := m.ProviderRequirements[reqd.Name]; exists { - if existing.Type != fqn { - panic("provider fqn mismatch") - } - existing.VersionConstraints = append(existing.VersionConstraints, reqd.Requirement) - } else { - m.ProviderRequirements[reqd.Name] = ProviderRequirements{Type: fqn, VersionConstraints: []VersionConstraint{reqd.Requirement}} - } - } - - for _, v := range file.Variables { - if existing, exists := m.Variables[v.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate variable declaration", - Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &v.DeclRange, - }) - } - m.Variables[v.Name] = v - } - - for _, l := range file.Locals { - if existing, exists := m.Locals[l.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate local value definition", - Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &l.DeclRange, - }) - } - m.Locals[l.Name] = l - } - - for _, o := range file.Outputs { - if existing, exists := m.Outputs[o.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate output definition", - Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), - Subject: &o.DeclRange, - }) - } - m.Outputs[o.Name] = o - } - - for _, mc := range file.ModuleCalls { - if existing, exists := m.ModuleCalls[mc.Name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate module call", - Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), - Subject: &mc.DeclRange, - }) - } - m.ModuleCalls[mc.Name] = mc - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - if existing, exists := m.ManagedResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.ManagedResources[key] = r - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - if existing, exists := m.DataResources[key]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), - Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), - Subject: &r.DeclRange, - }) - continue - } - m.DataResources[key] = r - } - - return diags -} - -func (m *Module) mergeFile(file *File) hcl.Diagnostics { - var diags hcl.Diagnostics - - if len(file.CoreVersionConstraints) != 0 { - // This is a bit of a strange case for overriding since we normally - // would union together across multiple files anyway, but we'll - // allow it and have each override file clobber any existing list. - m.CoreVersionConstraints = nil - for _, constraint := range file.CoreVersionConstraints { - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } - } - - if len(file.Backends) != 0 { - switch len(file.Backends) { - case 1: - m.Backend = file.Backends[0] - default: - // An override file with multiple backends is still invalid, even - // though it can override backends from _other_ files. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate backend configuration", - Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), - Subject: &file.Backends[1].DeclRange, - }) - } - } - - for _, pc := range file.ProviderConfigs { - key := pc.moduleUniqueKey() - existing, exists := m.ProviderConfigs[key] - if pc.Alias == "" { - // We allow overriding a non-existing _default_ provider configuration - // because the user model is that an absent provider configuration - // implies an empty provider configuration, which is what the user - // is therefore overriding here. - if exists { - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } else { - m.ProviderConfigs[key] = pc - } - } else { - // For aliased providers, there must be a base configuration to - // override. This allows us to detect and report alias typos - // that might otherwise cause the override to not apply. - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base provider configuration for override", - Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), - Subject: &pc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(pc) - diags = append(diags, mergeDiags...) - } - } - - if len(file.RequiredProviders) != 0 { - mergeProviderVersionConstraints(m.ProviderRequirements, file.RequiredProviders) - } - - for _, v := range file.Variables { - existing, exists := m.Variables[v.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base variable declaration to override", - Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), - Subject: &v.DeclRange, - }) - continue - } - mergeDiags := existing.merge(v) - diags = append(diags, mergeDiags...) - } - - for _, l := range file.Locals { - existing, exists := m.Locals[l.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base local value definition to override", - Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), - Subject: &l.DeclRange, - }) - continue - } - mergeDiags := existing.merge(l) - diags = append(diags, mergeDiags...) - } - - for _, o := range file.Outputs { - existing, exists := m.Outputs[o.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing base output definition to override", - Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), - Subject: &o.DeclRange, - }) - continue - } - mergeDiags := existing.merge(o) - diags = append(diags, mergeDiags...) - } - - for _, mc := range file.ModuleCalls { - existing, exists := m.ModuleCalls[mc.Name] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing module call to override", - Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), - Subject: &mc.DeclRange, - }) - continue - } - mergeDiags := existing.merge(mc) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.ManagedResources { - key := r.moduleUniqueKey() - existing, exists := m.ManagedResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing resource to override", - Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r) - diags = append(diags, mergeDiags...) - } - - for _, r := range file.DataResources { - key := r.moduleUniqueKey() - existing, exists := m.DataResources[key] - if !exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing data resource to override", - Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), - Subject: &r.DeclRange, - }) - continue - } - mergeDiags := existing.merge(r) - diags = append(diags, mergeDiags...) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go deleted file mode 100644 index a484ffef..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module_call.go +++ /dev/null @@ -1,188 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// ModuleCall represents a "module" block in a module or file. -type ModuleCall struct { - Name string - - SourceAddr string - SourceAddrRange hcl.Range - SourceSet bool - - Config hcl.Body - - Version VersionConstraint - - Count hcl.Expression - ForEach hcl.Expression - - Providers []PassedProviderConfig - - DependsOn []hcl.Traversal - - DeclRange hcl.Range -} - -func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { - mc := &ModuleCall{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := moduleBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, remain, diags := block.Body.PartialContent(schema) - mc.Config = remain - - if !hclsyntax.ValidIdentifier(mc.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module instance name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["source"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) - diags = append(diags, valDiags...) - mc.SourceAddrRange = attr.Expr.Range() - mc.SourceSet = true - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - mc.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - if attr, exists := content.Attributes["count"]; exists { - mc.Count = attr.Expr - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["for_each"]; exists { - mc.ForEach = attr.Expr - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - mc.DependsOn = append(mc.DependsOn, deps...) - - // We currently parse this, but don't yet do anything with it. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in module block", - Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), - Subject: &attr.NameRange, - }) - } - - if attr, exists := content.Attributes["providers"]; exists { - seen := make(map[string]hcl.Range) - pairs, pDiags := hcl.ExprMap(attr.Expr) - diags = append(diags, pDiags...) - for _, pair := range pairs { - key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") - diags = append(diags, keyDiags...) - value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") - diags = append(diags, valueDiags...) - if keyDiags.HasErrors() || valueDiags.HasErrors() { - continue - } - - matchKey := key.String() - if prev, exists := seen[matchKey]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate provider address", - Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), - Subject: pair.Value.Range().Ptr(), - }) - continue - } - - rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) - seen[matchKey] = rng - mc.Providers = append(mc.Providers, PassedProviderConfig{ - InChild: key, - InParent: value, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in module block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return mc, diags -} - -// PassedProviderConfig represents a provider config explicitly passed down to -// a child module, possibly giving it a new local address in the process. -type PassedProviderConfig struct { - InChild *ProviderConfigRef - InParent *ProviderConfigRef -} - -var moduleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "source", - Required: true, - }, - { - Name: "version", - }, - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "depends_on", - }, - { - Name: "providers", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - // These are all reserved for future use. - {Type: "lifecycle"}, - {Type: "locals"}, - {Type: "provider", LabelNames: []string{"type"}}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go deleted file mode 100644 index 6ab19c45..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module_merge.go +++ /dev/null @@ -1,248 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// The methods in this file are used by Module.mergeFile to apply overrides -// to our different configuration elements. These methods all follow the -// pattern of mutating the receiver to incorporate settings from the parameter, -// returning error diagnostics if any aspect of the parameter cannot be merged -// into the receiver for some reason. -// -// User expectation is that anything _explicitly_ set in the given object -// should take precedence over the corresponding settings in the receiver, -// but that anything omitted in the given object should be left unchanged. -// In some cases it may be reasonable to do a "deep merge" of certain nested -// features, if it is possible to unambiguously correlate the nested elements -// and their behaviors are orthogonal to each other. - -func (p *Provider) merge(op *Provider) hcl.Diagnostics { - var diags hcl.Diagnostics - - if op.Version.Required != nil { - p.Version = op.Version - } - - p.Config = MergeBodies(p.Config, op.Config) - - return diags -} - -func mergeProviderVersionConstraints(recv map[string]ProviderRequirements, ovrd []*RequiredProvider) { - // Any provider name that's mentioned in the override gets nilled out in - // our map so that we'll rebuild it below. Any provider not mentioned is - // left unchanged. - for _, reqd := range ovrd { - delete(recv, reqd.Name) - } - for _, reqd := range ovrd { - fqn := addrs.NewLegacyProvider(reqd.Name) - recv[reqd.Name] = ProviderRequirements{Type: fqn, VersionConstraints: []VersionConstraint{reqd.Requirement}} - } -} - -func (v *Variable) merge(ov *Variable) hcl.Diagnostics { - var diags hcl.Diagnostics - - if ov.DescriptionSet { - v.Description = ov.Description - v.DescriptionSet = ov.DescriptionSet - } - if ov.Default != cty.NilVal { - v.Default = ov.Default - } - if ov.Type != cty.NilType { - v.Type = ov.Type - } - if ov.ParsingMode != 0 { - v.ParsingMode = ov.ParsingMode - } - - // If the override file overrode type without default or vice-versa then - // it may have created an invalid situation, which we'll catch now by - // attempting to re-convert the value. - // - // Note that here we may be re-converting an already-converted base value - // from the base config. This will be a no-op if the type was not changed, - // but in particular might be user-observable in the edge case where the - // literal value in config could've been converted to the overridden type - // constraint but the converted value cannot. In practice, this situation - // should be rare since most of our conversions are interchangable. - if v.Default != cty.NilVal { - val, err := convert.Convert(v.Default, v.Type) - if err != nil { - // What exactly we'll say in the error message here depends on whether - // it was Default or Type that was overridden here. - switch { - case ov.Type != cty.NilType && ov.Default == cty.NilVal: - // If only the type was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), - Subject: &ov.DeclRange, - }) - case ov.Type == cty.NilType && ov.Default != cty.NilVal: - // Only the default was overridden - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), - Subject: &ov.DeclRange, - }) - } - } else { - v.Default = val - } - } - - return diags -} - -func (l *Local) merge(ol *Local) hcl.Diagnostics { - var diags hcl.Diagnostics - - // Since a local is just a single expression in configuration, the - // override definition entirely replaces the base definition, including - // the source range so that we'll send the user to the right place if - // there is an error. - l.Expr = ol.Expr - l.DeclRange = ol.DeclRange - - return diags -} - -func (o *Output) merge(oo *Output) hcl.Diagnostics { - var diags hcl.Diagnostics - - if oo.Description != "" { - o.Description = oo.Description - } - if oo.Expr != nil { - o.Expr = oo.Expr - } - if oo.SensitiveSet { - o.Sensitive = oo.Sensitive - o.SensitiveSet = oo.SensitiveSet - } - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(oo.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { - var diags hcl.Diagnostics - - if omc.SourceSet { - mc.SourceAddr = omc.SourceAddr - mc.SourceAddrRange = omc.SourceAddrRange - mc.SourceSet = omc.SourceSet - } - - if omc.Count != nil { - mc.Count = omc.Count - } - - if omc.ForEach != nil { - mc.ForEach = omc.ForEach - } - - if len(omc.Version.Required) != 0 { - mc.Version = omc.Version - } - - mc.Config = MergeBodies(mc.Config, omc.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(mc.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} - -func (r *Resource) merge(or *Resource) hcl.Diagnostics { - var diags hcl.Diagnostics - - if r.Mode != or.Mode { - // This is always a programming error, since managed and data resources - // are kept in separate maps in the configuration structures. - panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) - } - - if or.Count != nil { - r.Count = or.Count - } - if or.ForEach != nil { - r.ForEach = or.ForEach - } - if or.ProviderConfigRef != nil { - r.ProviderConfigRef = or.ProviderConfigRef - } - if r.Mode == addrs.ManagedResourceMode { - // or.Managed is always non-nil for managed resource mode - - if or.Managed.Connection != nil { - r.Managed.Connection = or.Managed.Connection - } - if or.Managed.CreateBeforeDestroySet { - r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy - r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet - } - if len(or.Managed.IgnoreChanges) != 0 { - r.Managed.IgnoreChanges = or.Managed.IgnoreChanges - } - if or.Managed.PreventDestroySet { - r.Managed.PreventDestroy = or.Managed.PreventDestroy - r.Managed.PreventDestroySet = or.Managed.PreventDestroySet - } - if len(or.Managed.Provisioners) != 0 { - r.Managed.Provisioners = or.Managed.Provisioners - } - } - - r.Config = MergeBodies(r.Config, or.Config) - - // We don't allow depends_on to be overridden because that is likely to - // cause confusing misbehavior. - if len(or.DependsOn) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported override", - Detail: "The depends_on argument may not be overridden.", - Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have - }) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go deleted file mode 100644 index 7b51eae8..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go +++ /dev/null @@ -1,143 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// MergeBodies creates a new HCL body that contains a combination of the -// given base and override bodies. Attributes and blocks defined in the -// override body take precedence over those of the same name defined in -// the base body. -// -// If any block of a particular type appears in "override" then it will -// replace _all_ of the blocks of the same type in "base" in the new -// body. -func MergeBodies(base, override hcl.Body) hcl.Body { - return mergeBody{ - Base: base, - Override: override, - } -} - -// mergeBody is a hcl.Body implementation that wraps a pair of other bodies -// and allows attributes and blocks within the override to take precedence -// over those defined in the base body. -// -// This is used to deal with dynamically-processed bodies in Module.mergeFile. -// It uses a shallow-only merging strategy where direct attributes defined -// in Override will override attributes of the same name in Base, while any -// blocks defined in Override will hide all blocks of the same type in Base. -// -// This cannot possibly "do the right thing" in all cases, because we don't -// have enough information about user intent. However, this behavior is intended -// to be reasonable for simple overriding use-cases. -type mergeBody struct { - Base hcl.Body - Override hcl.Body -} - -var _ hcl.Body = mergeBody{} - -func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, _, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - return content, diags -} - -func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - baseSchema := schemaWithDynamic(schema) - overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) - - baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) - diags = append(diags, cDiags...) - overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) - diags = append(diags, cDiags...) - - content := b.prepareContent(baseContent, overrideContent) - - remain := MergeBodies(baseRemain, overrideRemain) - - return content, remain, diags -} - -func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - } - - // For attributes we just assign from each map in turn and let the override - // map clobber any matching entries from base. - for k, a := range base.Attributes { - content.Attributes[k] = a - } - for k, a := range override.Attributes { - content.Attributes[k] = a - } - - // Things are a little more interesting for blocks because they arrive - // as a flat list. Our merging semantics call for us to suppress blocks - // from base if at least one block of the same type appears in override. - // We explicitly do not try to correlate and deeply merge nested blocks, - // since we don't have enough context here to infer user intent. - - overriddenBlockTypes := make(map[string]bool) - for _, block := range override.Blocks { - if block.Type == "dynamic" { - overriddenBlockTypes[block.Labels[0]] = true - continue - } - overriddenBlockTypes[block.Type] = true - } - for _, block := range base.Blocks { - // We skip over dynamic blocks whose type label is an overridden type - // but note that below we do still leave them as dynamic blocks in - // the result because expanding the dynamic blocks that are left is - // done much later during the core graph walks, where we can safely - // evaluate the expressions. - if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { - continue - } - if overriddenBlockTypes[block.Type] { - continue - } - content.Blocks = append(content.Blocks, block) - } - for _, block := range override.Blocks { - content.Blocks = append(content.Blocks, block) - } - - return content -} - -func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - var diags hcl.Diagnostics - ret := make(hcl.Attributes) - - baseAttrs, aDiags := b.Base.JustAttributes() - diags = append(diags, aDiags...) - overrideAttrs, aDiags := b.Override.JustAttributes() - diags = append(diags, aDiags...) - - for k, a := range baseAttrs { - ret[k] = a - } - for k, a := range overrideAttrs { - ret[k] = a - } - - return ret, diags -} - -func (b mergeBody) MissingItemRange() hcl.Range { - return b.Base.MissingItemRange() -} diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go deleted file mode 100644 index 128bd278..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/named_values.go +++ /dev/null @@ -1,574 +0,0 @@ -package configs - -import ( - "fmt" - "unicode" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/typeexpr" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/addrs" -) - -// A consistent detail message for all "not a valid identifier" diagnostics. -const badIdentifierDetail = "A name must start with a letter or underscore and may contain only letters, digits, underscores, and dashes." - -// Variable represents a "variable" block in a module or file. -type Variable struct { - Name string - Description string - Default cty.Value - Type cty.Type - ParsingMode VariableParsingMode - Validations []*VariableValidation - - DescriptionSet bool - - DeclRange hcl.Range -} - -func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { - v := &Variable{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - // Unless we're building an override, we'll set some defaults - // which we might override with attributes below. We leave these - // as zero-value in the override case so we can recognize whether - // or not they are set when we merge. - if !override { - v.Type = cty.DynamicPseudoType - v.ParsingMode = VariableParseLiteral - } - - content, diags := block.Body.Content(variableBlockSchema) - - if !hclsyntax.ValidIdentifier(v.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - // Don't allow declaration of variables that would conflict with the - // reserved attribute and block type names in a "module" block, since - // these won't be usable for child modules. - for _, attr := range moduleBlockSchema.Attributes { - if attr.Name == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), - Subject: &block.LabelRanges[0], - }) - } - } - for _, blockS := range moduleBlockSchema.Blocks { - if blockS.Type == v.Name { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable name", - Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), - Subject: &block.LabelRanges[0], - }) - } - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) - diags = append(diags, valDiags...) - v.DescriptionSet = true - } - - if attr, exists := content.Attributes["type"]; exists { - ty, parseMode, tyDiags := decodeVariableType(attr.Expr) - diags = append(diags, tyDiags...) - v.Type = ty - v.ParsingMode = parseMode - } - - if attr, exists := content.Attributes["default"]; exists { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - - // Convert the default to the expected type so we can catch invalid - // defaults early and allow later code to assume validity. - // Note that this depends on us having already processed any "type" - // attribute above. - // However, we can't do this if we're in an override file where - // the type might not be set; we'll catch that during merge. - if v.Type != cty.NilType { - var err error - val, err = convert.Convert(val, v.Type) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid default value for variable", - Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), - Subject: attr.Expr.Range().Ptr(), - }) - val = cty.DynamicVal - } - } - - v.Default = val - } - - for _, block := range content.Blocks { - switch block.Type { - - case "validation": - vv, moreDiags := decodeVariableValidationBlock(v.Name, block, override) - diags = append(diags, moreDiags...) - v.Validations = append(v.Validations, vv) - - default: - // The above cases should be exhaustive for all block types - // defined in variableBlockSchema - panic(fmt.Sprintf("unhandled block type %q", block.Type)) - } - } - - return v, diags -} - -func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { - if exprIsNativeQuotedString(expr) { - // Here we're accepting the pre-0.12 form of variable type argument where - // the string values "string", "list" and "map" are accepted has a hint - // about the type used primarily for deciding how to parse values - // given on the command line and in environment variables. - // Only the native syntax ends up in this codepath; we handle the - // JSON syntax (which is, of course, quoted even in the new format) - // in the normal codepath below. - val, diags := expr.Value(nil) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - str := val.AsString() - switch str { - case "string": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted type constraints are deprecated", - Detail: "Terraform 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of Terraform. To silence this warning, remove the quotes around \"string\".", - Subject: expr.Range().Ptr(), - }) - return cty.String, VariableParseLiteral, diags - case "list": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted type constraints are deprecated", - Detail: "Terraform 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of Terraform. To silence this warning, remove the quotes around \"list\" and write list(string) instead to explicitly indicate that the list elements are strings.", - Subject: expr.Range().Ptr(), - }) - return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags - case "map": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Quoted type constraints are deprecated", - Detail: "Terraform 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of Terraform. To silence this warning, remove the quotes around \"map\" and write map(string) instead to explicitly indicate that the map elements are strings.", - Subject: expr.Range().Ptr(), - }) - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags - default: - return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ - Severity: hcl.DiagError, - Summary: "Invalid legacy variable type hint", - Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, - Subject: expr.Range().Ptr(), - }} - } - } - - // First we'll deal with some shorthand forms that the HCL-level type - // expression parser doesn't include. These both emulate pre-0.12 behavior - // of allowing a list or map of any element type as long as all of the - // elements are consistent. This is the same as list(any) or map(any). - switch hcl.ExprAsKeyword(expr) { - case "list": - return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil - case "map": - return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil - } - - ty, diags := typeexpr.TypeConstraint(expr) - if diags.HasErrors() { - return cty.DynamicPseudoType, VariableParseHCL, diags - } - - switch { - case ty.IsPrimitiveType(): - // Primitive types use literal parsing. - return ty, VariableParseLiteral, diags - default: - // Everything else uses HCL parsing - return ty, VariableParseHCL, diags - } -} - -// Required returns true if this variable is required to be set by the caller, -// or false if there is a default value that will be used when it isn't set. -func (v *Variable) Required() bool { - return v.Default == cty.NilVal -} - -// VariableParsingMode defines how values of a particular variable given by -// text-only mechanisms (command line arguments and environment variables) -// should be parsed to produce the final value. -type VariableParsingMode rune - -// VariableParseLiteral is a variable parsing mode that just takes the given -// string directly as a cty.String value. -const VariableParseLiteral VariableParsingMode = 'L' - -// VariableParseHCL is a variable parsing mode that attempts to parse the given -// string as an HCL expression and returns the result. -const VariableParseHCL VariableParsingMode = 'H' - -// Parse uses the receiving parsing mode to process the given variable value -// string, returning the result along with any diagnostics. -// -// A VariableParsingMode does not know the expected type of the corresponding -// variable, so it's the caller's responsibility to attempt to convert the -// result to the appropriate type and return to the user any diagnostics that -// conversion may produce. -// -// The given name is used to create a synthetic filename in case any diagnostics -// must be generated about the given string value. This should be the name -// of the root module variable whose value will be populated from the given -// string. -// -// If the returned diagnostics has errors, the returned value may not be -// valid. -func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { - switch m { - case VariableParseLiteral: - return cty.StringVal(value), nil - case VariableParseHCL: - fakeFilename := fmt.Sprintf("", name) - expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - val, valDiags := expr.Value(nil) - diags = append(diags, valDiags...) - return val, diags - default: - // Should never happen - panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) - } -} - -// VariableValidation represents a configuration-defined validation rule -// for a particular input variable, given as a "validation" block inside -// a "variable" block. -type VariableValidation struct { - // Condition is an expression that refers to the variable being tested - // and contains no other references. The expression must return true - // to indicate that the value is valid or false to indicate that it is - // invalid. If the expression produces an error, that's considered a bug - // in the module defining the validation rule, not an error in the caller. - Condition hcl.Expression - - // ErrorMessage is one or more full sentences, which would need to be in - // English for consistency with the rest of the error message output but - // can in practice be in any language as long as it ends with a period. - // The message should describe what is required for the condition to return - // true in a way that would make sense to a caller of the module. - ErrorMessage string - - DeclRange hcl.Range -} - -func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*VariableValidation, hcl.Diagnostics) { - var diags hcl.Diagnostics - vv := &VariableValidation{ - DeclRange: block.DefRange, - } - - if override { - // For now we'll just forbid overriding validation blocks, to simplify - // the initial design. If we can find a clear use-case for overriding - // validations in override files and there's a way to define it that - // isn't confusing then we could relax this. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Can't override variable validation rules", - Detail: "Variable \"validation\" blocks cannot be used in override files.", - Subject: vv.DeclRange.Ptr(), - }) - return vv, diags - } - - content, moreDiags := block.Body.Content(variableValidationBlockSchema) - diags = append(diags, moreDiags...) - - if attr, exists := content.Attributes["condition"]; exists { - vv.Condition = attr.Expr - - // The validation condition can only refer to the variable itself, - // to ensure that the variable declaration can't create additional - // edges in the dependency graph. - goodRefs := 0 - for _, traversal := range vv.Condition.Variables() { - ref, moreDiags := addrs.ParseRef(traversal) - if !moreDiags.HasErrors() { - if addr, ok := ref.Subject.(addrs.InputVariable); ok { - if addr.Name == varName { - goodRefs++ - continue // Reference is valid - } - } - } - // If we fall out here then the reference is invalid. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid reference in variable validation", - Detail: fmt.Sprintf("The condition for variable %q can only refer to the variable itself, using var.%s.", varName, varName), - Subject: traversal.SourceRange().Ptr(), - }) - } - if goodRefs < 1 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid variable validation condition", - Detail: fmt.Sprintf("The condition for variable %q must refer to var.%s in order to test incoming values.", varName, varName), - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - if attr, exists := content.Attributes["error_message"]; exists { - moreDiags := gohcl.DecodeExpression(attr.Expr, nil, &vv.ErrorMessage) - diags = append(diags, moreDiags...) - if !moreDiags.HasErrors() { - const errSummary = "Invalid validation error message" - switch { - case vv.ErrorMessage == "": - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "An empty string is not a valid nor useful error message.", - Subject: attr.Expr.Range().Ptr(), - }) - case !looksLikeSentences(vv.ErrorMessage): - // Because we're going to include this string verbatim as part - // of a bigger error message written in our usual style in - // English, we'll require the given error message to conform - // to that. We might relax this in future if e.g. we start - // presenting these error messages in a different way, or if - // Terraform starts supporting producing error messages in - // other human languages, etc. - // For pragmatism we also allow sentences ending with - // exclamation points, but we don't mention it explicitly here - // because that's not really consistent with the Terraform UI - // writing style. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "Validation error message must be at least one full English sentence starting with an uppercase letter and ending with a period or question mark.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - } - - return vv, diags -} - -// looksLikeSentence is a simple heuristic that encourages writing error -// messages that will be presentable when included as part of a larger -// Terraform error diagnostic whose other text is written in the Terraform -// UI writing style. -// -// This is intentionally not a very strong validation since we're assuming -// that module authors want to write good messages and might just need a nudge -// about Terraform's specific style, rather than that they are going to try -// to work around these rules to write a lower-quality message. -func looksLikeSentences(s string) bool { - if len(s) < 1 { - return false - } - runes := []rune(s) // HCL guarantees that all strings are valid UTF-8 - first := runes[0] - last := runes[len(s)-1] - - // If the first rune is a letter then it must be an uppercase letter. - // (This will only see the first rune in a multi-rune combining sequence, - // but the first rune is generally the letter if any are, and if not then - // we'll just ignore it because we're primarily expecting English messages - // right now anyway, for consistency with all of Terraform's other output.) - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - return false - } - - // The string must be at least one full sentence, which implies having - // sentence-ending punctuation. - // (This assumes that if a sentence ends with quotes then the period - // will be outside the quotes, which is consistent with Terraform's UI - // writing style.) - return last == '.' || last == '?' || last == '!' -} - -// Output represents an "output" block in a module or file. -type Output struct { - Name string - Description string - Expr hcl.Expression - DependsOn []hcl.Traversal - Sensitive bool - - DescriptionSet bool - SensitiveSet bool - - DeclRange hcl.Range -} - -func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { - o := &Output{ - Name: block.Labels[0], - DeclRange: block.DefRange, - } - - schema := outputBlockSchema - if override { - schema = schemaForOverrides(schema) - } - - content, diags := block.Body.Content(schema) - - if !hclsyntax.ValidIdentifier(o.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid output name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - - if attr, exists := content.Attributes["description"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) - diags = append(diags, valDiags...) - o.DescriptionSet = true - } - - if attr, exists := content.Attributes["value"]; exists { - o.Expr = attr.Expr - } - - if attr, exists := content.Attributes["sensitive"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) - diags = append(diags, valDiags...) - o.SensitiveSet = true - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - o.DependsOn = append(o.DependsOn, deps...) - } - - return o, diags -} - -// Local represents a single entry from a "locals" block in a module or file. -// The "locals" block itself is not represented, because it serves only to -// provide context for us to interpret its contents. -type Local struct { - Name string - Expr hcl.Expression - - DeclRange hcl.Range -} - -func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - if len(attrs) == 0 { - return nil, diags - } - - locals := make([]*Local, 0, len(attrs)) - for name, attr := range attrs { - if !hclsyntax.ValidIdentifier(name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid local value name", - Detail: badIdentifierDetail, - Subject: &attr.NameRange, - }) - } - - locals = append(locals, &Local{ - Name: name, - Expr: attr.Expr, - DeclRange: attr.Range, - }) - } - return locals, diags -} - -// Addr returns the address of the local value declared by the receiver, -// relative to its containing module. -func (l *Local) Addr() addrs.LocalValue { - return addrs.LocalValue{ - Name: l.Name, - } -} - -var variableBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "default", - }, - { - Name: "type", - }, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "validation", - }, - }, -} - -var variableValidationBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "condition", - Required: true, - }, - { - Name: "error_message", - Required: true, - }, - }, -} - -var outputBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "description", - }, - { - Name: "value", - Required: true, - }, - { - Name: "depends_on", - }, - { - Name: "sensitive", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go deleted file mode 100644 index 2a621b57..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser.go +++ /dev/null @@ -1,100 +0,0 @@ -package configs - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclparse" - "github.com/spf13/afero" -) - -// Parser is the main interface to read configuration files and other related -// files from disk. -// -// It retains a cache of all files that are loaded so that they can be used -// to create source code snippets in diagnostics, etc. -type Parser struct { - fs afero.Afero - p *hclparse.Parser -} - -// NewParser creates and returns a new Parser that reads files from the given -// filesystem. If a nil filesystem is passed then the system's "real" filesystem -// will be used, via afero.OsFs. -func NewParser(fs afero.Fs) *Parser { - if fs == nil { - fs = afero.OsFs{} - } - - return &Parser{ - fs: afero.Afero{Fs: fs}, - p: hclparse.NewParser(), - } -} - -// LoadHCLFile is a low-level method that reads the file at the given path, -// parses it, and returns the hcl.Body representing its root. In many cases -// it is better to use one of the other Load*File methods on this type, -// which additionally decode the root body in some way and return a higher-level -// construct. -// -// If the file cannot be read at all -- e.g. because it does not exist -- then -// this method will return a nil body and error diagnostics. In this case -// callers may wish to ignore the provided error diagnostics and produce -// a more context-sensitive error instead. -// -// The file will be parsed using the HCL native syntax unless the filename -// ends with ".json", in which case the HCL JSON syntax will be used. -func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { - src, err := p.fs.ReadFile(path) - - if err != nil { - return nil, hcl.Diagnostics{ - { - Severity: hcl.DiagError, - Summary: "Failed to read file", - Detail: fmt.Sprintf("The file %q could not be read.", path), - }, - } - } - - var file *hcl.File - var diags hcl.Diagnostics - switch { - case strings.HasSuffix(path, ".json"): - file, diags = p.p.ParseJSON(src, path) - default: - file, diags = p.p.ParseHCL(src, path) - } - - // If the returned file or body is nil, then we'll return a non-nil empty - // body so we'll meet our contract that nil means an error reading the file. - if file == nil || file.Body == nil { - return hcl.EmptyBody(), diags - } - - return file.Body, diags -} - -// Sources returns a map of the cached source buffers for all files that -// have been loaded through this parser, with source filenames (as requested -// when each file was opened) as the keys. -func (p *Parser) Sources() map[string][]byte { - return p.p.Sources() -} - -// ForceFileSource artificially adds source code to the cache of file sources, -// as if it had been loaded from the given filename. -// -// This should be used only in special situations where configuration is loaded -// some other way. Most callers should load configuration via methods of -// Parser, which will update the sources cache automatically. -func (p *Parser) ForceFileSource(filename string, src []byte) { - // We'll make a synthetic hcl.File here just so we can reuse the - // existing cache. - p.p.AddFile(filename, &hcl.File{ - Body: hcl.EmptyBody(), - Bytes: src, - }) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go deleted file mode 100644 index e8f94721..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser_config.go +++ /dev/null @@ -1,264 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigFile reads the file at the given path and parses it as a config -// file. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil *File will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, false) -} - -// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes -// certain required attribute constraints in order to interpret the given -// file as an overrides file. -func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { - return p.loadConfigFile(path, true) -} - -func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { - - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - file := &File{} - - var reqDiags hcl.Diagnostics - file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) - diags = append(diags, reqDiags...) - - // We'll load the experiments first because other decoding logic in the - // loop below might depend on these experiments. - var expDiags hcl.Diagnostics - file.ActiveExperiments, expDiags = sniffActiveExperiments(body) - diags = append(diags, expDiags...) - - content, contentDiags := body.Content(configFileSchema) - diags = append(diags, contentDiags...) - - for _, block := range content.Blocks { - switch block.Type { - - case "terraform": - content, contentDiags := block.Body.Content(terraformBlockSchema) - diags = append(diags, contentDiags...) - - // We ignore the "terraform_version" and "experiments" attributes - // here because sniffCoreVersionRequirements and - // sniffActiveExperiments already dealt with those above. - - for _, innerBlock := range content.Blocks { - switch innerBlock.Type { - - case "backend": - backendCfg, cfgDiags := decodeBackendBlock(innerBlock) - diags = append(diags, cfgDiags...) - if backendCfg != nil { - file.Backends = append(file.Backends, backendCfg) - } - - case "required_providers": - reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) - diags = append(diags, reqsDiags...) - file.RequiredProviders = append(file.RequiredProviders, reqs...) - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - case "provider": - cfg, cfgDiags := decodeProviderBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ProviderConfigs = append(file.ProviderConfigs, cfg) - } - - case "variable": - cfg, cfgDiags := decodeVariableBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Variables = append(file.Variables, cfg) - } - - case "locals": - defs, defsDiags := decodeLocalsBlock(block) - diags = append(diags, defsDiags...) - file.Locals = append(file.Locals, defs...) - - case "output": - cfg, cfgDiags := decodeOutputBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.Outputs = append(file.Outputs, cfg) - } - - case "module": - cfg, cfgDiags := decodeModuleBlock(block, override) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ModuleCalls = append(file.ModuleCalls, cfg) - } - - case "resource": - cfg, cfgDiags := decodeResourceBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.ManagedResources = append(file.ManagedResources, cfg) - } - - case "data": - cfg, cfgDiags := decodeDataBlock(block) - diags = append(diags, cfgDiags...) - if cfg != nil { - file.DataResources = append(file.DataResources, cfg) - } - - default: - // Should never happen because the above cases should be exhaustive - // for all block type names in our schema. - continue - - } - } - - return file, diags -} - -// sniffCoreVersionRequirements does minimal parsing of the given body for -// "terraform" blocks with "required_version" attributes, returning the -// requirements found. -// -// This is intended to maximize the chance that we'll be able to read the -// requirements (syntax errors notwithstanding) even if the config file contains -// constructs that might've been added in future Terraform versions -// -// This is a "best effort" sort of method which will return constraints it is -// able to find, but may return no constraints at all if the given body is -// so invalid that it cannot be decoded at all. -func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { - rootContent, _, diags := body.PartialContent(configFileTerraformBlockSniffRootSchema) - - var constraints []VersionConstraint - - for _, block := range rootContent.Blocks { - content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) - diags = append(diags, blockDiags...) - - attr, exists := content.Attributes["required_version"] - if !exists { - continue - } - - constraint, constraintDiags := decodeVersionConstraint(attr) - diags = append(diags, constraintDiags...) - if !constraintDiags.HasErrors() { - constraints = append(constraints, constraint) - } - } - - return constraints, diags -} - -// configFileSchema is the schema for the top-level of a config file. We use -// the low-level HCL API for this level so we can easily deal with each -// block type separately with its own decoding logic. -var configFileSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - { - Type: "provider", - LabelNames: []string{"name"}, - }, - { - Type: "variable", - LabelNames: []string{"name"}, - }, - { - Type: "locals", - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "module", - LabelNames: []string{"name"}, - }, - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - }, -} - -// terraformBlockSchema is the schema for a top-level "terraform" block in -// a configuration file. -var terraformBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - {Name: "required_version"}, - {Name: "experiments"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "backend", - LabelNames: []string{"type"}, - }, - { - Type: "required_providers", - }, - }, -} - -// configFileTerraformBlockSniffRootSchema is a schema for -// sniffCoreVersionRequirements and sniffActiveExperiments. -var configFileTerraformBlockSniffRootSchema = &hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "terraform", - }, - }, -} - -// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements -var configFileVersionSniffBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "required_version", - }, - }, -} - -// configFileExperimentsSniffBlockSchema is a schema for sniffActiveExperiments, -// to decode a single attribute from inside a "terraform" block. -var configFileExperimentsSniffBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "experiments", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go deleted file mode 100644 index 2923af93..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go +++ /dev/null @@ -1,163 +0,0 @@ -package configs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/hashicorp/hcl/v2" -) - -// LoadConfigDir reads the .tf and .tf.json files in the given directory -// as config files (using LoadConfigFile) and then combines these files into -// a single Module. -// -// If this method returns nil, that indicates that the given directory does not -// exist at all or could not be opened for some reason. Callers may wish to -// detect this case and ignore the returned diagnostics so that they can -// produce a more context-aware error message in that case. -// -// If this method returns a non-nil module while error diagnostics are returned -// then the module may be incomplete but can be used carefully for static -// analysis. -// -// This file does not consider a directory with no files to be an error, and -// will simply return an empty module in that case. Callers should first call -// Parser.IsConfigDir if they wish to recognize that situation. -// -// .tf files are parsed using the HCL native syntax while .tf.json files are -// parsed using the HCL JSON syntax. -func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { - primaryPaths, overridePaths, diags := p.dirFiles(path) - if diags.HasErrors() { - return nil, diags - } - - primary, fDiags := p.loadFiles(primaryPaths, false) - diags = append(diags, fDiags...) - override, fDiags := p.loadFiles(overridePaths, true) - diags = append(diags, fDiags...) - - mod, modDiags := NewModule(primary, override) - diags = append(diags, modDiags...) - - mod.SourceDir = path - - return mod, diags -} - -// ConfigDirFiles returns lists of the primary and override files configuration -// files in the given directory. -// -// If the given directory does not exist or cannot be read, error diagnostics -// are returned. If errors are returned, the resulting lists may be incomplete. -func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - return p.dirFiles(dir) -} - -// IsConfigDir determines whether the given path refers to a directory that -// exists and contains at least one Terraform config file (with a .tf or -// .tf.json extension.) -func (p *Parser) IsConfigDir(path string) bool { - primaryPaths, overridePaths, _ := p.dirFiles(path) - return (len(primaryPaths) + len(overridePaths)) > 0 -} - -func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { - var files []*File - var diags hcl.Diagnostics - - for _, path := range paths { - var f *File - var fDiags hcl.Diagnostics - if override { - f, fDiags = p.LoadConfigFileOverride(path) - } else { - f, fDiags = p.LoadConfigFile(path) - } - diags = append(diags, fDiags...) - if f != nil { - files = append(files, f) - } - } - - return files, diags -} - -func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { - infos, err := p.fs.ReadDir(dir) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Failed to read module directory", - Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), - }) - return - } - - for _, info := range infos { - if info.IsDir() { - // We only care about files - continue - } - - name := info.Name() - ext := fileExt(name) - if ext == "" || IsIgnoredFile(name) { - continue - } - - baseName := name[:len(name)-len(ext)] // strip extension - isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") - - fullPath := filepath.Join(dir, name) - if isOverride { - override = append(override, fullPath) - } else { - primary = append(primary, fullPath) - } - } - - return -} - -// fileExt returns the Terraform configuration extension of the given -// path, or a blank string if it is not a recognized extension. -func fileExt(path string) string { - if strings.HasSuffix(path, ".tf") { - return ".tf" - } else if strings.HasSuffix(path, ".tf.json") { - return ".tf.json" - } else { - return "" - } -} - -// IsIgnoredFile returns true if the given filename (which must not have a -// directory path ahead of it) should be ignored as e.g. an editor swap file. -func IsIgnoredFile(name string) bool { - return strings.HasPrefix(name, ".") || // Unix-like hidden files - strings.HasSuffix(name, "~") || // vim - strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs -} - -// IsEmptyDir returns true if the given filesystem path contains no Terraform -// configuration files. -// -// Unlike the methods of the Parser type, this function always consults the -// real filesystem, and thus it isn't appropriate to use when working with -// configuration loaded from a plan file. -func IsEmptyDir(path string) (bool, error) { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - return true, nil - } - - p := NewParser(nil) - fs, os, diags := p.dirFiles(path) - if diags.HasErrors() { - return false, diags - } - - return len(fs) == 0 && len(os) == 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go deleted file mode 100644 index 10d98e5b..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/parser_values.go +++ /dev/null @@ -1,43 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" -) - -// LoadValuesFile reads the file at the given path and parses it as a "values -// file", which is an HCL config file whose top-level attributes are treated -// as arbitrary key.value pairs. -// -// If the file cannot be read -- for example, if it does not exist -- then -// a nil map will be returned along with error diagnostics. Callers may wish -// to disregard the returned diagnostics in this case and instead generate -// their own error message(s) with additional context. -// -// If the returned diagnostics has errors when a non-nil map is returned -// then the map may be incomplete but should be valid enough for careful -// static analysis. -// -// This method wraps LoadHCLFile, and so it inherits the syntax selection -// behaviors documented for that method. -func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { - body, diags := p.LoadHCLFile(path) - if body == nil { - return nil, diags - } - - vals := make(map[string]cty.Value) - attrs, attrDiags := body.JustAttributes() - diags = append(diags, attrDiags...) - if attrs == nil { - return vals, diags - } - - for name, attr := range attrs { - val, valDiags := attr.Expr.Value(nil) - diags = append(diags, valDiags...) - vals[name] = val - } - - return vals, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go deleted file mode 100644 index 6b57fc60..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provider.go +++ /dev/null @@ -1,209 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" -) - -// Provider represents a "provider" block in a module or file. A provider -// block is a provider configuration, and there can be zero or more -// configurations for each actual provider. -type Provider struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if no alias set - - Version VersionConstraint - - Config hcl.Body - - DeclRange hcl.Range -} - -func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { - var diags hcl.Diagnostics - - // Produce deprecation messages for any pre-0.12-style - // single-interpolation-only expressions. We do this up front here because - // then we can also catch instances inside special blocks like "connection", - // before PartialContent extracts them. - moreDiags := warnForDeprecatedInterpolationsInBody(block.Body) - diags = append(diags, moreDiags...) - - content, config, moreDiags := block.Body.PartialContent(providerBlockSchema) - diags = append(diags, moreDiags...) - - provider := &Provider{ - Name: block.Labels[0], - NameRange: block.LabelRanges[0], - Config: config, - DeclRange: block.DefRange, - } - - if attr, exists := content.Attributes["alias"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) - diags = append(diags, valDiags...) - provider.AliasRange = attr.Expr.Range().Ptr() - - if !hclsyntax.ValidIdentifier(provider.Alias) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration alias", - Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), - }) - } - } - - if attr, exists := content.Attributes["version"]; exists { - var versionDiags hcl.Diagnostics - provider.Version, versionDiags = decodeVersionConstraint(attr) - diags = append(diags, versionDiags...) - } - - // Reserved attribute names - for _, name := range []string{"count", "depends_on", "for_each", "source"} { - if attr, exists := content.Attributes[name]; exists { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved argument name in provider block", - Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), - Subject: &attr.NameRange, - }) - } - } - - // Reserved block types (all of them) - for _, block := range content.Blocks { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provider block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - - return provider, diags -} - -// Addr returns the address of the receiving provider configuration, relative -// to its containing module. -func (p *Provider) Addr() addrs.ProviderConfig { - return addrs.ProviderConfig{ - Type: addrs.NewLegacyProvider(p.Name), - Alias: p.Alias, - } -} - -func (p *Provider) moduleUniqueKey() string { - if p.Alias != "" { - return fmt.Sprintf("%s.%s", p.Name, p.Alias) - } - return p.Name -} - -// ParseProviderConfigCompact parses the given absolute traversal as a relative -// provider address in compact form. The following are examples of traversals -// that can be successfully parsed as compact relative provider configuration -// addresses: -// -// aws -// aws.foo -// -// This function will panic if given a relative traversal. -// -// If the returned diagnostics contains errors then the result value is invalid -// and must not be used. -func ParseProviderConfigCompact(traversal hcl.Traversal) (addrs.ProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := addrs.ProviderConfig{ - Type: addrs.NewLegacyProvider(traversal.RootName()), - } - - if len(traversal) < 2 { - // Just a type name, then. - return ret, diags - } - - aliasStep := traversal[1] - switch ts := aliasStep.(type) { - case hcl.TraverseAttr: - ret.Alias = ts.Name - return ret, diags - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", - Subject: aliasStep.SourceRange().Ptr(), - }) - } - - if len(traversal) > 2 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration address", - Detail: "Extraneous extra operators after provider configuration address.", - Subject: traversal[2:].SourceRange().Ptr(), - }) - } - - return ret, diags -} - -// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact -// that takes a string and parses it with the HCL native syntax traversal parser -// before interpreting it. -// -// This should be used only in specialized situations since it will cause the -// created references to not have any meaningful source location information. -// If a reference string is coming from a source that should be identified in -// error messages then the caller should instead parse it directly using a -// suitable function from the HCL API and pass the traversal itself to -// ParseProviderConfigCompact. -// -// Error diagnostics are returned if either the parsing fails or the analysis -// of the traversal fails. There is no way for the caller to distinguish the -// two kinds of diagnostics programmatically. If error diagnostics are returned -// then the returned address is invalid. -func ParseProviderConfigCompactStr(str string) (addrs.ProviderConfig, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(parseDiags) - if parseDiags.HasErrors() { - return addrs.ProviderConfig{}, diags - } - - addr, addrDiags := ParseProviderConfigCompact(traversal) - diags = diags.Append(addrDiags) - return addr, diags -} - -var providerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "alias", - }, - { - Name: "version", - }, - - // Attribute names reserved for future expansion. - {Name: "count"}, - {Name: "depends_on"}, - {Name: "for_each"}, - {Name: "source"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - // _All_ of these are reserved for future expansion. - {Type: "lifecycle"}, - {Type: "locals"}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider_requirements.go b/vendor/github.com/hashicorp/terraform/configs/provider_requirements.go deleted file mode 100644 index 3709ddda..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provider_requirements.go +++ /dev/null @@ -1,92 +0,0 @@ -package configs - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" -) - -// RequiredProvider represents a declaration of a dependency on a particular -// provider version without actually configuring that provider. This is used in -// child modules that expect a provider to be passed in from their parent. -// -// TODO: "Source" is a placeholder for an attribute that is not yet supported. -type RequiredProvider struct { - Name string - Source string // TODO - Requirement VersionConstraint -} - -// ProviderRequirements represents merged provider version constraints. -// VersionConstraints come from terraform.require_providers blocks and provider -// blocks. -type ProviderRequirements struct { - Type addrs.Provider - VersionConstraints []VersionConstraint -} - -func decodeRequiredProvidersBlock(block *hcl.Block) ([]*RequiredProvider, hcl.Diagnostics) { - attrs, diags := block.Body.JustAttributes() - var reqs []*RequiredProvider - for name, attr := range attrs { - expr, err := attr.Expr.Value(nil) - if err != nil { - diags = append(diags, err...) - } - - rp := &RequiredProvider{ - Name: name, - } - - switch { - case expr.Type().IsPrimitiveType(): - vc, reqDiags := decodeVersionConstraint(attr) - diags = append(diags, reqDiags...) - rp.Requirement = vc - - case expr.Type().IsObjectType(): - if expr.Type().HasAttribute("version") { - vc := VersionConstraint{ - DeclRange: attr.Range, - } - constraintStr := expr.GetAttr("version").AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", - Subject: attr.Expr.Range().Ptr(), - }) - } else { - vc.Required = constraints - rp.Requirement = vc - } - } - if expr.Type().HasAttribute("source") { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Provider source not supported in Terraform v0.12", - Detail: fmt.Sprintf("A source was declared for provider %s. Terraform v0.12 does not support the provider source attribute. It will be ignored.", name), - Subject: attr.Expr.Range().Ptr(), - }) - } - default: - // should not happen - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider_requirements syntax", - Detail: "provider_requirements entries must be strings or objects.", - Subject: attr.Expr.Range().Ptr(), - }) - reqs = append(reqs, &RequiredProvider{Name: name}) - return reqs, diags - } - reqs = append(reqs, rp) - } - return reqs, diags -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go deleted file mode 100644 index 6f380860..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provisioner.go +++ /dev/null @@ -1,204 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" -) - -// Provisioner represents a "provisioner" block when used within a -// "resource" block in a module or file. -type Provisioner struct { - Type string - Config hcl.Body - Connection *Connection - When ProvisionerWhen - OnFailure ProvisionerOnFailure - - DeclRange hcl.Range - TypeRange hcl.Range -} - -func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { - pv := &Provisioner{ - Type: block.Labels[0], - TypeRange: block.LabelRanges[0], - DeclRange: block.DefRange, - When: ProvisionerWhenCreate, - OnFailure: ProvisionerOnFailureFail, - } - - content, config, diags := block.Body.PartialContent(provisionerBlockSchema) - pv.Config = config - - if attr, exists := content.Attributes["when"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "create": - pv.When = ProvisionerWhenCreate - case "destroy": - pv.When = ProvisionerWhenDestroy - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"when\" keyword", - Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", - Subject: expr.Range().Ptr(), - }) - } - } - - // destroy provisioners can only refer to self - if pv.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(config)...) - } - - if attr, exists := content.Attributes["on_failure"]; exists { - expr, shimDiags := shimTraversalInString(attr.Expr, true) - diags = append(diags, shimDiags...) - - switch hcl.ExprAsKeyword(expr) { - case "continue": - pv.OnFailure = ProvisionerOnFailureContinue - case "fail": - pv.OnFailure = ProvisionerOnFailureFail - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid \"on_failure\" keyword", - Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - // destroy provisioners can only refer to self - if pv.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(block.Body)...) - } - - pv.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in provisioner block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return pv, diags -} - -func onlySelfRefs(body hcl.Body) hcl.Diagnostics { - var diags hcl.Diagnostics - - // Provisioners currently do not use any blocks in their configuration. - // Blocks are likely to remain solely for meta parameters, but in the case - // that blocks are supported for provisioners, we will want to extend this - // to find variables in nested blocks. - attrs, _ := body.JustAttributes() - for _, attr := range attrs { - for _, v := range attr.Expr.Variables() { - valid := false - switch v.RootName() { - case "self", "path", "terraform": - valid = true - case "count": - // count must use "index" - if len(v) == 2 { - if t, ok := v[1].(hcl.TraverseAttr); ok && t.Name == "index" { - valid = true - } - } - - case "each": - if len(v) == 2 { - if t, ok := v[1].(hcl.TraverseAttr); ok && t.Name == "key" { - valid = true - } - } - } - - if !valid { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "External references from destroy provisioners are deprecated", - Detail: "Destroy-time provisioners and their connection configurations may only " + - "reference attributes of the related resource, via 'self', 'count.index', " + - "or 'each.key'.\n\nReferences to other resources during the destroy phase " + - "can cause dependency cycles and interact poorly with create_before_destroy.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - } - return diags -} - -// Connection represents a "connection" block when used within either a -// "resource" or "provisioner" block in a module or file. -type Connection struct { - Config hcl.Body - - DeclRange hcl.Range -} - -// ProvisionerWhen is an enum for valid values for when to run provisioners. -type ProvisionerWhen int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen - -const ( - ProvisionerWhenInvalid ProvisionerWhen = iota - ProvisionerWhenCreate - ProvisionerWhenDestroy -) - -// ProvisionerOnFailure is an enum for valid values for on_failure options -// for provisioners. -type ProvisionerOnFailure int - -//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure - -const ( - ProvisionerOnFailureInvalid ProvisionerOnFailure = iota - ProvisionerOnFailureContinue - ProvisionerOnFailureFail -) - -var provisionerBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - {Name: "when"}, - {Name: "on_failure"}, - }, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "connection"}, - {Type: "lifecycle"}, // reserved for future use - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go deleted file mode 100644 index 7ff5a6e0..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerOnFailureInvalid-0] - _ = x[ProvisionerOnFailureContinue-1] - _ = x[ProvisionerOnFailureFail-2] -} - -const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" - -var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} - -func (i ProvisionerOnFailure) String() string { - if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { - return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go deleted file mode 100644 index 9f21b3ac..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ProvisionerWhenInvalid-0] - _ = x[ProvisionerWhenCreate-1] - _ = x[ProvisionerWhenDestroy-2] -} - -const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" - -var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} - -func (i ProvisionerWhen) String() string { - if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { - return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go deleted file mode 100644 index 7ca73ff1..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/resource.go +++ /dev/null @@ -1,510 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" -) - -// Resource represents a "resource" or "data" block in a module or file. -type Resource struct { - Mode addrs.ResourceMode - Name string - Type string - Config hcl.Body - Count hcl.Expression - ForEach hcl.Expression - - ProviderConfigRef *ProviderConfigRef - - DependsOn []hcl.Traversal - - // Managed is populated only for Mode = addrs.ManagedResourceMode, - // containing the additional fields that apply to managed resources. - // For all other resource modes, this field is nil. - Managed *ManagedResource - - DeclRange hcl.Range - TypeRange hcl.Range -} - -// ManagedResource represents a "resource" block in a module or file. -type ManagedResource struct { - Connection *Connection - Provisioners []*Provisioner - - CreateBeforeDestroy bool - PreventDestroy bool - IgnoreChanges []hcl.Traversal - IgnoreAllChanges bool - - CreateBeforeDestroySet bool - PreventDestroySet bool -} - -func (r *Resource) moduleUniqueKey() string { - return r.Addr().String() -} - -// Addr returns a resource address for the receiver that is relative to the -// resource's containing module. -func (r *Resource) Addr() addrs.Resource { - return addrs.Resource{ - Mode: r.Mode, - Type: r.Type, - Name: r.Name, - } -} - -// ProviderConfigAddr returns the address for the provider configuration -// that should be used for this resource. This function implements the -// default behavior of extracting the type from the resource type name if -// an explicit "provider" argument was not provided. -func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { - if r.ProviderConfigRef == nil { - return r.Addr().DefaultProviderConfig() - } - - return addrs.ProviderConfig{ - Type: addrs.NewLegacyProvider(r.ProviderConfigRef.Name), - Alias: r.ProviderConfigRef.Alias, - } -} - -func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - var diags hcl.Diagnostics - r := &Resource{ - Mode: addrs.ManagedResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - Managed: &ManagedResource{}, - } - - // Produce deprecation messages for any pre-0.12-style - // single-interpolation-only expressions. We do this up front here because - // then we can also catch instances inside special blocks like "connection", - // before PartialContent extracts them. - moreDiags := warnForDeprecatedInterpolationsInBody(block.Body) - diags = append(diags, moreDiags...) - - content, remain, moreDiags := block.Body.PartialContent(resourceBlockSchema) - diags = append(diags, moreDiags...) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same resource block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - var seenLifecycle *hcl.Block - var seenConnection *hcl.Block - for _, block := range content.Blocks { - switch block.Type { - case "lifecycle": - if seenLifecycle != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate lifecycle block", - Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenLifecycle = block - - lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) - diags = append(diags, lcDiags...) - - if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) - diags = append(diags, valDiags...) - r.Managed.CreateBeforeDestroySet = true - } - - if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { - valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) - diags = append(diags, valDiags...) - r.Managed.PreventDestroySet = true - } - - if attr, exists := lcContent.Attributes["ignore_changes"]; exists { - - // ignore_changes can either be a list of relative traversals - // or it can be just the keyword "all" to ignore changes to this - // resource entirely. - // ignore_changes = [ami, instance_type] - // ignore_changes = all - // We also allow two legacy forms for compatibility with earlier - // versions: - // ignore_changes = ["ami", "instance_type"] - // ignore_changes = ["*"] - - kw := hcl.ExprAsKeyword(attr.Expr) - - switch { - case kw == "all": - r.Managed.IgnoreAllChanges = true - default: - exprs, listDiags := hcl.ExprList(attr.Expr) - diags = append(diags, listDiags...) - - var ignoreAllRange hcl.Range - - for _, expr := range exprs { - - // our expr might be the literal string "*", which - // we accept as a deprecated way of saying "all". - if shimIsIgnoreChangesStar(expr) { - r.Managed.IgnoreAllChanges = true - ignoreAllRange = expr.Range() - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Deprecated ignore_changes wildcard", - Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", - Subject: attr.Expr.Range().Ptr(), - }) - continue - } - - expr, shimDiags := shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.RelTraversalForExpr(expr) - diags = append(diags, travDiags...) - if len(traversal) != 0 { - r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) - } - } - - if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid ignore_changes ruleset", - Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", - Subject: &ignoreAllRange, - Context: attr.Expr.Range().Ptr(), - }) - } - - } - - } - - case "connection": - if seenConnection != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Duplicate connection block", - Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), - Subject: &block.DefRange, - }) - continue - } - seenConnection = block - - r.Managed.Connection = &Connection{ - Config: block.Body, - DeclRange: block.DefRange, - } - - case "provisioner": - pv, pvDiags := decodeProvisionerBlock(block) - diags = append(diags, pvDiags...) - if pv != nil { - r.Managed.Provisioners = append(r.Managed.Provisioners, pv) - } - - default: - // Any other block types are ones we've reserved for future use, - // so they get a generic message. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in resource block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - // Now we can validate the connection block references if there are any destroy provisioners. - // TODO: should we eliminate standalone connection blocks? - if r.Managed.Connection != nil { - for _, p := range r.Managed.Provisioners { - if p.When == ProvisionerWhenDestroy { - diags = append(diags, onlySelfRefs(r.Managed.Connection.Config)...) - break - } - } - } - - return r, diags -} - -func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { - r := &Resource{ - Mode: addrs.DataResourceMode, - Type: block.Labels[0], - Name: block.Labels[1], - DeclRange: block.DefRange, - TypeRange: block.LabelRanges[0], - } - - content, remain, diags := block.Body.PartialContent(dataBlockSchema) - r.Config = remain - - if !hclsyntax.ValidIdentifier(r.Type) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[0], - }) - } - if !hclsyntax.ValidIdentifier(r.Name) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data resource name", - Detail: badIdentifierDetail, - Subject: &block.LabelRanges[1], - }) - } - - if attr, exists := content.Attributes["count"]; exists { - r.Count = attr.Expr - } - - if attr, exists := content.Attributes["for_each"]; exists { - r.ForEach = attr.Expr - // Cannot have count and for_each on the same data block - if r.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid combination of "count" and "for_each"`, - Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, - Subject: &attr.NameRange, - }) - } - } - - if attr, exists := content.Attributes["provider"]; exists { - var providerDiags hcl.Diagnostics - r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") - diags = append(diags, providerDiags...) - } - - if attr, exists := content.Attributes["depends_on"]; exists { - deps, depsDiags := decodeDependsOn(attr) - diags = append(diags, depsDiags...) - r.DependsOn = append(r.DependsOn, deps...) - } - - for _, block := range content.Blocks { - // All of the block types we accept are just reserved for future use, but some get a specialized error message. - switch block.Type { - case "lifecycle": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported lifecycle block", - Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", - Subject: &block.DefRange, - }) - default: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Reserved block type name in data block", - Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, - }) - } - } - - return r, diags -} - -type ProviderConfigRef struct { - Name string - NameRange hcl.Range - Alias string - AliasRange *hcl.Range // nil if alias not set -} - -func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { - var diags hcl.Diagnostics - - var shimDiags hcl.Diagnostics - expr, shimDiags = shimTraversalInString(expr, false) - diags = append(diags, shimDiags...) - - traversal, travDiags := hcl.AbsTraversalForExpr(expr) - - // AbsTraversalForExpr produces only generic errors, so we'll discard - // the errors given and produce our own with extra context. If we didn't - // get any errors then we might still have warnings, though. - if !travDiags.HasErrors() { - diags = append(diags, travDiags...) - } - - if len(traversal) < 1 || len(traversal) > 2 { - // A provider reference was given as a string literal in the legacy - // configuration language and there are lots of examples out there - // showing that usage, so we'll sniff for that situation here and - // produce a specialized error message for it to help users find - // the new correct form. - if exprIsNativeQuotedString(expr) { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "A provider configuration reference must not be given in quotes.", - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), - Subject: expr.Range().Ptr(), - }) - return nil, diags - } - - ret := &ProviderConfigRef{ - Name: traversal.RootName(), - NameRange: traversal[0].SourceRange(), - } - - if len(traversal) > 1 { - aliasStep, ok := traversal[1].(hcl.TraverseAttr) - if !ok { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider configuration reference", - Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", - Subject: traversal[1].SourceRange().Ptr(), - }) - return ret, diags - } - - ret.Alias = aliasStep.Name - ret.AliasRange = aliasStep.SourceRange().Ptr() - } - - return ret, diags -} - -// Addr returns the provider config address corresponding to the receiving -// config reference. -// -// This is a trivial conversion, essentially just discarding the source -// location information and keeping just the addressing information. -func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { - return addrs.ProviderConfig{ - Type: addrs.NewLegacyProvider(r.Name), - Alias: r.Alias, - } -} - -func (r *ProviderConfigRef) String() string { - if r == nil { - return "" - } - if r.Alias != "" { - return fmt.Sprintf("%s.%s", r.Name, r.Alias) - } - return r.Name -} - -var commonResourceAttributes = []hcl.AttributeSchema{ - { - Name: "count", - }, - { - Name: "for_each", - }, - { - Name: "provider", - }, - { - Name: "depends_on", - }, -} - -var resourceBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "locals"}, // reserved for future use - {Type: "lifecycle"}, - {Type: "connection"}, - {Type: "provisioner", LabelNames: []string{"type"}}, - }, -} - -var dataBlockSchema = &hcl.BodySchema{ - Attributes: commonResourceAttributes, - Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, // reserved for future use - {Type: "locals"}, // reserved for future use - }, -} - -var resourceLifecycleBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "create_before_destroy", - }, - { - Name: "prevent_destroy", - }, - { - Name: "ignore_changes", - }, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go deleted file mode 100644 index cd914e5d..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/synth_body.go +++ /dev/null @@ -1,118 +0,0 @@ -package configs - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" -) - -// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes -// corresponding to the elements given in the values map. -// -// This is useful in situations where, for example, values provided on the -// command line can override values given in configuration, using MergeBodies. -// -// The given filename is used in case any diagnostics are returned. Since -// the created body is synthetic, it is likely that this will not be a "real" -// filename. For example, if from a command line argument it could be -// a representation of that argument's name, such as "-var=...". -func SynthBody(filename string, values map[string]cty.Value) hcl.Body { - return synthBody{ - Filename: filename, - Values: values, - } -} - -type synthBody struct { - Filename string - Values map[string]cty.Value -} - -func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - content, remain, diags := b.PartialContent(schema) - remainS := remain.(synthBody) - for name := range remainS.Values { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported attribute", - Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), - Subject: b.synthRange().Ptr(), - }) - } - return content, diags -} - -func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - var diags hcl.Diagnostics - content := &hcl.BodyContent{ - Attributes: make(hcl.Attributes), - MissingItemRange: b.synthRange(), - } - - remainValues := make(map[string]cty.Value) - for attrName, val := range b.Values { - remainValues[attrName] = val - } - - for _, attrS := range schema.Attributes { - delete(remainValues, attrS.Name) - val, defined := b.Values[attrS.Name] - if !defined { - if attrS.Required { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing required attribute", - Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), - Subject: b.synthRange().Ptr(), - }) - } - continue - } - content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) - } - - // We just ignore blocks altogether, because this body type never has - // nested blocks. - - remain := synthBody{ - Filename: b.Filename, - Values: remainValues, - } - - return content, remain, diags -} - -func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - ret := make(hcl.Attributes) - for name, val := range b.Values { - ret[name] = b.synthAttribute(name, val) - } - return ret, nil -} - -func (b synthBody) MissingItemRange() hcl.Range { - return b.synthRange() -} - -func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { - rng := b.synthRange() - return &hcl.Attribute{ - Name: name, - Expr: &hclsyntax.LiteralValueExpr{ - Val: val, - SrcRange: rng, - }, - NameRange: rng, - Range: rng, - } -} - -func (b synthBody) synthRange() hcl.Range { - return hcl.Range{ - Filename: b.Filename, - Start: hcl.Pos{Line: 1, Column: 1}, - End: hcl.Pos{Line: 1, Column: 1}, - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go deleted file mode 100644 index e135546f..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/util.go +++ /dev/null @@ -1,63 +0,0 @@ -package configs - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" -) - -// exprIsNativeQuotedString determines whether the given expression looks like -// it's a quoted string in the HCL native syntax. -// -// This should be used sparingly only for situations where our legacy HCL -// decoding would've expected a keyword or reference in quotes but our new -// decoding expects the keyword or reference to be provided directly as -// an identifier-based expression. -func exprIsNativeQuotedString(expr hcl.Expression) bool { - _, ok := expr.(*hclsyntax.TemplateExpr) - return ok -} - -// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is -// equivalent except that any required attributes are forced to not be required. -// -// This is useful for dealing with "override" config files, which are allowed -// to omit things that they don't wish to override from the main configuration. -// -// The returned schema may have some pointers in common with the given schema, -// so neither the given schema nor the returned schema should be modified after -// using this function in order to avoid confusion. -// -// Overrides are rarely used, so it's recommended to just create the override -// schema on the fly only when it's needed, rather than storing it in a global -// variable as we tend to do for a primary schema. -func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} - -// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that -// is equivalent except that it accepts an additional block type "dynamic" with -// a single label, used to recognize usage of the HCL dynamic block extension. -func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: schema.Attributes, - Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), - } - - copy(ret.Blocks, schema.Blocks) - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: "dynamic", - LabelNames: []string{"type"}, - }) - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go deleted file mode 100644 index c02ad4b5..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go +++ /dev/null @@ -1,45 +0,0 @@ -package configs - -// VariableTypeHint is an enumeration used for the Variable.TypeHint field, -// which is an incompletely-specified type for the variable which is used -// as a hint for whether a value provided in an ambiguous context (on the -// command line or in an environment variable) should be taken literally as a -// string or parsed as an HCL expression to produce a data structure. -// -// The type hint is applied to runtime values as well, but since it does not -// accurately describe a precise type it is not fully-sufficient to infer -// the dynamic type of a value passed through a variable. -// -// These hints use inaccurate terminology for historical reasons. Full details -// are in the documentation for each constant in this enumeration, but in -// summary: -// -// TypeHintString requires a primitive type -// TypeHintList requires a type that could be converted to a tuple -// TypeHintMap requires a type that could be converted to an object -type VariableTypeHint rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint - -// TypeHintNone indicates the absence of a type hint. Values specified in -// ambiguous contexts will be treated as literal strings, as if TypeHintString -// were selected, but no runtime value checks will be applied. This is reasonable -// type hint for a module that is never intended to be used at the top-level -// of a configuration, since descendent modules never receive values from -// ambiguous contexts. -const TypeHintNone VariableTypeHint = 0 - -// TypeHintString spec indicates that a value provided in an ambiguous context -// should be treated as a literal string, and additionally requires that the -// runtime value for the variable is of a primitive type (string, number, bool). -const TypeHintString VariableTypeHint = 'S' - -// TypeHintList indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an tuple, list, or set type. -const TypeHintList VariableTypeHint = 'L' - -// TypeHintMap indicates that a value provided in an ambiguous context should -// be treated as an HCL expression, and additionally requires that the -// runtime value for the variable is of an object or map type. -const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go deleted file mode 100644 index 2b50428c..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. - -package configs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeHintNone-0] - _ = x[TypeHintString-83] - _ = x[TypeHintList-76] - _ = x[TypeHintMap-77] -} - -const ( - _VariableTypeHint_name_0 = "TypeHintNone" - _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" - _VariableTypeHint_name_2 = "TypeHintString" -) - -var ( - _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} -) - -func (i VariableTypeHint) String() string { - switch { - case i == 0: - return _VariableTypeHint_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] - case i == 83: - return _VariableTypeHint_name_2 - default: - return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go deleted file mode 100644 index 0f541dc7..00000000 --- a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go +++ /dev/null @@ -1,71 +0,0 @@ -package configs - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// VersionConstraint represents a version constraint on some resource -// (e.g. Terraform Core, a provider, a module, ...) that carries with it -// a source range so that a helpful diagnostic can be printed in the event -// that a particular constraint does not match. -type VersionConstraint struct { - Required version.Constraints - DeclRange hcl.Range -} - -func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { - ret := VersionConstraint{ - DeclRange: attr.Range, - } - - val, diags := attr.Expr.Value(nil) - if diags.HasErrors() { - return ret, diags - } - var err error - val, err = convert.Convert(val, cty.String) - if err != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - if val.IsNull() { - // A null version constraint is strange, but we'll just treat it - // like an empty constraint set. - return ret, diags - } - - if !val.IsWhollyKnown() { - // If there is a syntax error, HCL sets the value of the given attribute - // to cty.DynamicVal. A diagnostic for the syntax error will already - // bubble up, so we will move forward gracefully here. - return ret, diags - } - - constraintStr := val.AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( - Subject: attr.Expr.Range().Ptr(), - }) - return ret, diags - } - - ret.Required = constraints - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go deleted file mode 100644 index 77c67eff..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ /dev/null @@ -1,301 +0,0 @@ -package dag - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/go-multierror" -) - -// AcyclicGraph is a specialization of Graph that cannot have cycles. With -// this property, we get the property of sane graph traversal. -type AcyclicGraph struct { - Graph -} - -// WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) tfdiags.Diagnostics - -// DepthWalkFunc is a walk function that also receives the current depth of the -// walk as an argument -type DepthWalkFunc func(Vertex, int) error - -func (g *AcyclicGraph) DirectedGraph() Grapher { - return g -} - -// Returns a Set that includes every Vertex yielded by walking down from the -// provided starting Vertex v. -func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.DownEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.DepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Returns a Set that includes every Vertex yielded by walking up from the -// provided starting Vertex v. -func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { - s := new(Set) - start := AsVertexList(g.UpEdges(v)) - memoFunc := func(v Vertex, d int) error { - s.Add(v) - return nil - } - - if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { - return nil, err - } - - return s, nil -} - -// Root returns the root of the DAG, or an error. -// -// Complexity: O(V) -func (g *AcyclicGraph) Root() (Vertex, error) { - roots := make([]Vertex, 0, 1) - for _, v := range g.Vertices() { - if g.UpEdges(v).Len() == 0 { - roots = append(roots, v) - } - } - - if len(roots) > 1 { - // TODO(mitchellh): make this error message a lot better - return nil, fmt.Errorf("multiple roots: %#v", roots) - } - - if len(roots) == 0 { - return nil, fmt.Errorf("no roots found") - } - - return roots[0], nil -} - -// TransitiveReduction performs the transitive reduction of graph g in place. -// The transitive reduction of a graph is a graph with as few edges as -// possible with the same reachability as the original graph. This means -// that if there are three nodes A => B => C, and A connects to both -// B and C, and B connects to C, then the transitive reduction is the -// same graph with only a single edge between A and B, and a single edge -// between B and C. -// -// The graph must be valid for this operation to behave properly. If -// Validate() returns an error, the behavior is undefined and the results -// will likely be unexpected. -// -// Complexity: O(V(V+E)), or asymptotically O(VE) -func (g *AcyclicGraph) TransitiveReduction() { - // For each vertex u in graph g, do a DFS starting from each vertex - // v such that the edge (u,v) exists (v is a direct descendant of u). - // - // For each v-prime reachable from v, remove the edge (u, v-prime). - defer g.debug.BeginOperation("TransitiveReduction", "").End("") - - for _, u := range g.Vertices() { - uTargets := g.DownEdges(u) - vs := AsVertexList(g.DownEdges(u)) - - g.depthFirstWalk(vs, false, func(v Vertex, d int) error { - shared := uTargets.Intersection(g.DownEdges(v)) - for _, vPrime := range AsVertexList(shared) { - g.RemoveEdge(BasicEdge(u, vPrime)) - } - - return nil - }) - } -} - -// Validate validates the DAG. A DAG is valid if it has a single root -// with no cycles. -func (g *AcyclicGraph) Validate() error { - if _, err := g.Root(); err != nil { - return err - } - - // Look for cycles of more than 1 component - var err error - cycles := g.Cycles() - if len(cycles) > 0 { - for _, cycle := range cycles { - cycleStr := make([]string, len(cycle)) - for j, vertex := range cycle { - cycleStr[j] = VertexName(vertex) - } - - err = multierror.Append(err, fmt.Errorf( - "Cycle: %s", strings.Join(cycleStr, ", "))) - } - } - - // Look for cycles to self - for _, e := range g.Edges() { - if e.Source() == e.Target() { - err = multierror.Append(err, fmt.Errorf( - "Self reference: %s", VertexName(e.Source()))) - } - } - - return err -} - -func (g *AcyclicGraph) Cycles() [][]Vertex { - var cycles [][]Vertex - for _, cycle := range StronglyConnected(&g.Graph) { - if len(cycle) > 1 { - cycles = append(cycles, cycle) - } - } - return cycles -} - -// Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. The resulting diagnostics -// contains problems from all graphs visited, in no particular order. -func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { - defer g.debug.BeginOperation(typeWalk, "").End("") - - w := &Walker{Callback: cb, Reverse: true} - w.Update(g) - return w.Wait() -} - -// simple convenience helper for converting a dag.Set to a []Vertex -func AsVertexList(s *Set) []Vertex { - rawList := s.List() - vertexList := make([]Vertex, len(rawList)) - for i, raw := range rawList { - vertexList[i] = raw.(Vertex) - } - return vertexList -} - -type vertexAtDepth struct { - Vertex Vertex - Depth int -} - -// depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. -func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - return g.depthFirstWalk(start, true, f) -} - -// This internal method provides the option of not sorting the vertices during -// the walk, which we use for the Transitive reduction. -// Some configurations can lead to fully-connected subgraphs, which makes our -// transitive reduction algorithm O(n^3). This is still passable for the size -// of our graphs, but the additional n^2 sort operations would make this -// uncomputable in a reasonable amount of time. -func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - // Visit targets of this in a consistent order. - targets := AsVertexList(g.DownEdges(current.Vertex)) - - if sorted { - sort.Sort(byVertexName(targets)) - } - - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - } - - return nil -} - -// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from -// the vertices in start. -func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { - defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") - - seen := make(map[Vertex]struct{}) - frontier := make([]*vertexAtDepth, len(start)) - for i, v := range start { - frontier[i] = &vertexAtDepth{ - Vertex: v, - Depth: 0, - } - } - for len(frontier) > 0 { - // Pop the current vertex - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check if we've seen this already and return... - if _, ok := seen[current.Vertex]; ok { - continue - } - seen[current.Vertex] = struct{}{} - - // Add next set of targets in a consistent order. - targets := AsVertexList(g.UpEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) - for _, t := range targets { - frontier = append(frontier, &vertexAtDepth{ - Vertex: t, - Depth: current.Depth + 1, - }) - } - - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - } - - return nil -} - -// byVertexName implements sort.Interface so a list of Vertices can be sorted -// consistently by their VertexName -type byVertexName []Vertex - -func (b byVertexName) Len() int { return len(b) } -func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byVertexName) Less(i, j int) bool { - return VertexName(b[i]) < VertexName(b[j]) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go deleted file mode 100644 index 7e6d2af3..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/dot.go +++ /dev/null @@ -1,282 +0,0 @@ -package dag - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// DotOpts are the options for generating a dot formatted Graph. -type DotOpts struct { - // Allows some nodes to decide to only show themselves when the user has - // requested the "verbose" graph. - Verbose bool - - // Highlight Cycles - DrawCycles bool - - // How many levels to expand modules as we draw - MaxDepth int - - // use this to keep the cluster_ naming convention from the previous dot writer - cluster bool -} - -// GraphNodeDotter can be implemented by a node to cause it to be included -// in the dot graph. The Dot method will be called which is expected to -// return a representation of this node. -type GraphNodeDotter interface { - // Dot is called to return the dot formatting for the node. - // The first parameter is the title of the node. - // The second parameter includes user-specified options that affect the dot - // graph. See GraphDotOpts below for details. - DotNode(string, *DotOpts) *DotNode -} - -// DotNode provides a structure for Vertices to return in order to specify their -// dot format. -type DotNode struct { - Name string - Attrs map[string]string -} - -// Returns the DOT representation of this Graph. -func (g *marshalGraph) Dot(opts *DotOpts) []byte { - if opts == nil { - opts = &DotOpts{ - DrawCycles: true, - MaxDepth: -1, - Verbose: true, - } - } - - var w indentWriter - w.WriteString("digraph {\n") - w.Indent() - - // some dot defaults - w.WriteString(`compound = "true"` + "\n") - w.WriteString(`newrank = "true"` + "\n") - - // the top level graph is written as the first subgraph - w.WriteString(`subgraph "root" {` + "\n") - g.writeBody(opts, &w) - - // cluster isn't really used other than for naming purposes in some graphs - opts.cluster = opts.MaxDepth != 0 - maxDepth := opts.MaxDepth - if maxDepth == 0 { - maxDepth = -1 - } - - for _, s := range g.Subgraphs { - g.writeSubgraph(s, opts, maxDepth, &w) - } - - w.Unindent() - w.WriteString("}\n") - return w.Bytes() -} - -func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - name := v.Name - attrs := v.Attrs - if v.graphNodeDotter != nil { - node := v.graphNodeDotter.DotNode(name, opts) - if node == nil { - return []byte{} - } - - newAttrs := make(map[string]string) - for k, v := range attrs { - newAttrs[k] = v - } - for k, v := range node.Attrs { - newAttrs[k] = v - } - - name = node.Name - attrs = newAttrs - } - - buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) - writeAttrs(&buf, attrs) - buf.WriteByte('\n') - - return buf.Bytes() -} - -func (e *marshalEdge) dot(g *marshalGraph) string { - var buf bytes.Buffer - graphName := g.Name - if graphName == "" { - graphName = "root" - } - - sourceName := g.vertexByID(e.Source).Name - targetName := g.vertexByID(e.Target).Name - s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) - buf.WriteString(s) - writeAttrs(&buf, e.Attrs) - - return buf.String() -} - -func cycleDot(e *marshalEdge, g *marshalGraph) string { - return e.dot(g) + ` [color = "red", penwidth = "2.0"]` -} - -// Write the subgraph body. The is recursive, and the depth argument is used to -// record the current depth of iteration. -func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { - if depth == 0 { - return - } - depth-- - - name := sg.Name - if opts.cluster { - // we prefix with cluster_ to match the old dot output - name = "cluster_" + name - sg.Attrs["label"] = sg.Name - } - w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) - sg.writeBody(opts, w) - - for _, sg := range sg.Subgraphs { - g.writeSubgraph(sg, opts, depth, w) - } -} - -func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { - w.Indent() - - for _, as := range attrStrings(g.Attrs) { - w.WriteString(as + "\n") - } - - // list of Vertices that aren't to be included in the dot output - skip := map[string]bool{} - - for _, v := range g.Vertices { - if v.graphNodeDotter == nil { - skip[v.ID] = true - continue - } - - w.Write(v.dot(g, opts)) - } - - var dotEdges []string - - if opts.DrawCycles { - for _, c := range g.Cycles { - if len(c) < 2 { - continue - } - - for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { - if j >= len(c) { - j = 0 - } - src := c[i] - tgt := c[j] - - if skip[src.ID] || skip[tgt.ID] { - continue - } - - e := &marshalEdge{ - Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), - Source: src.ID, - Target: tgt.ID, - Attrs: make(map[string]string), - } - - dotEdges = append(dotEdges, cycleDot(e, g)) - src = tgt - } - } - } - - for _, e := range g.Edges { - dotEdges = append(dotEdges, e.dot(g)) - } - - // srot these again to match the old output - sort.Strings(dotEdges) - - for _, e := range dotEdges { - w.WriteString(e + "\n") - } - - w.Unindent() - w.WriteString("}\n") -} - -func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { - if len(attrs) > 0 { - buf.WriteString(" [") - buf.WriteString(strings.Join(attrStrings(attrs), ", ")) - buf.WriteString("]") - } -} - -func attrStrings(attrs map[string]string) []string { - strings := make([]string, 0, len(attrs)) - for k, v := range attrs { - strings = append(strings, fmt.Sprintf("%s = %q", k, v)) - } - sort.Strings(strings) - return strings -} - -// Provide a bytes.Buffer like structure, which will indent when starting a -// newline. -type indentWriter struct { - bytes.Buffer - level int -} - -func (w *indentWriter) indent() { - newline := []byte("\n") - if !bytes.HasSuffix(w.Bytes(), newline) { - return - } - for i := 0; i < w.level; i++ { - w.Buffer.WriteString("\t") - } -} - -// Indent increases indentation by 1 -func (w *indentWriter) Indent() { w.level++ } - -// Unindent decreases indentation by 1 -func (w *indentWriter) Unindent() { w.level-- } - -// the following methods intercecpt the byte.Buffer writes and insert the -// indentation when starting a new line. -func (w *indentWriter) Write(b []byte) (int, error) { - w.indent() - return w.Buffer.Write(b) -} - -func (w *indentWriter) WriteString(s string) (int, error) { - w.indent() - return w.Buffer.WriteString(s) -} -func (w *indentWriter) WriteByte(b byte) error { - w.indent() - return w.Buffer.WriteByte(b) -} -func (w *indentWriter) WriteRune(r rune) (int, error) { - w.indent() - return w.Buffer.WriteRune(r) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go deleted file mode 100644 index f0d99ee3..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/edge.go +++ /dev/null @@ -1,37 +0,0 @@ -package dag - -import ( - "fmt" -) - -// Edge represents an edge in the graph, with a source and target vertex. -type Edge interface { - Source() Vertex - Target() Vertex - - Hashable -} - -// BasicEdge returns an Edge implementation that simply tracks the source -// and target given as-is. -func BasicEdge(source, target Vertex) Edge { - return &basicEdge{S: source, T: target} -} - -// basicEdge is a basic implementation of Edge that has the source and -// target vertex. -type basicEdge struct { - S, T Vertex -} - -func (e *basicEdge) Hashcode() interface{} { - return fmt.Sprintf("%p-%p", e.S, e.T) -} - -func (e *basicEdge) Source() Vertex { - return e.S -} - -func (e *basicEdge) Target() Vertex { - return e.T -} diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go deleted file mode 100644 index e7517a20..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/graph.go +++ /dev/null @@ -1,391 +0,0 @@ -package dag - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "sort" -) - -// Graph is used to represent a dependency graph. -type Graph struct { - vertices *Set - edges *Set - downEdges map[interface{}]*Set - upEdges map[interface{}]*Set - - // JSON encoder for recording debug information - debug *encoder -} - -// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. -type Subgrapher interface { - Subgraph() Grapher -} - -// A Grapher is any type that returns a Grapher, mainly used to identify -// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they -// return themselves. -type Grapher interface { - DirectedGraph() Grapher -} - -// Vertex of the graph. -type Vertex interface{} - -// NamedVertex is an optional interface that can be implemented by Vertex -// to give it a human-friendly name that is used for outputting the graph. -type NamedVertex interface { - Vertex - Name() string -} - -func (g *Graph) DirectedGraph() Grapher { - return g -} - -// Vertices returns the list of all the vertices in the graph. -func (g *Graph) Vertices() []Vertex { - list := g.vertices.List() - result := make([]Vertex, len(list)) - for i, v := range list { - result[i] = v.(Vertex) - } - - return result -} - -// Edges returns the list of all the edges in the graph. -func (g *Graph) Edges() []Edge { - list := g.edges.List() - result := make([]Edge, len(list)) - for i, v := range list { - result[i] = v.(Edge) - } - - return result -} - -// EdgesFrom returns the list of edges from the given source. -func (g *Graph) EdgesFrom(v Vertex) []Edge { - var result []Edge - from := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Source()) == from { - result = append(result, e) - } - } - - return result -} - -// EdgesTo returns the list of edges to the given target. -func (g *Graph) EdgesTo(v Vertex) []Edge { - var result []Edge - search := hashcode(v) - for _, e := range g.Edges() { - if hashcode(e.Target()) == search { - result = append(result, e) - } - } - - return result -} - -// HasVertex checks if the given Vertex is present in the graph. -func (g *Graph) HasVertex(v Vertex) bool { - return g.vertices.Include(v) -} - -// HasEdge checks if the given Edge is present in the graph. -func (g *Graph) HasEdge(e Edge) bool { - return g.edges.Include(e) -} - -// Add adds a vertex to the graph. This is safe to call multiple time with -// the same Vertex. -func (g *Graph) Add(v Vertex) Vertex { - g.init() - g.vertices.Add(v) - g.debug.Add(v) - return v -} - -// Remove removes a vertex from the graph. This will also remove any -// edges with this vertex as a source or target. -func (g *Graph) Remove(v Vertex) Vertex { - // Delete the vertex itself - g.vertices.Delete(v) - g.debug.Remove(v) - - // Delete the edges to non-existent things - for _, target := range g.DownEdges(v).List() { - g.RemoveEdge(BasicEdge(v, target)) - } - for _, source := range g.UpEdges(v).List() { - g.RemoveEdge(BasicEdge(source, v)) - } - - return nil -} - -// Replace replaces the original Vertex with replacement. If the original -// does not exist within the graph, then false is returned. Otherwise, true -// is returned. -func (g *Graph) Replace(original, replacement Vertex) bool { - // If we don't have the original, we can't do anything - if !g.vertices.Include(original) { - return false - } - - defer g.debug.BeginOperation("Replace", "").End("") - - // If they're the same, then don't do anything - if original == replacement { - return true - } - - // Add our new vertex, then copy all the edges - g.Add(replacement) - for _, target := range g.DownEdges(original).List() { - g.Connect(BasicEdge(replacement, target)) - } - for _, source := range g.UpEdges(original).List() { - g.Connect(BasicEdge(source, replacement)) - } - - // Remove our old vertex, which will also remove all the edges - g.Remove(original) - - return true -} - -// RemoveEdge removes an edge from the graph. -func (g *Graph) RemoveEdge(edge Edge) { - g.init() - g.debug.RemoveEdge(edge) - - // Delete the edge from the set - g.edges.Delete(edge) - - // Delete the up/down edges - if s, ok := g.downEdges[hashcode(edge.Source())]; ok { - s.Delete(edge.Target()) - } - if s, ok := g.upEdges[hashcode(edge.Target())]; ok { - s.Delete(edge.Source()) - } -} - -// DownEdges returns the outward edges from the source Vertex v. -func (g *Graph) DownEdges(v Vertex) *Set { - g.init() - return g.downEdges[hashcode(v)] -} - -// UpEdges returns the inward edges to the destination Vertex v. -func (g *Graph) UpEdges(v Vertex) *Set { - g.init() - return g.upEdges[hashcode(v)] -} - -// Connect adds an edge with the given source and target. This is safe to -// call multiple times with the same value. Note that the same value is -// verified through pointer equality of the vertices, not through the -// value of the edge itself. -func (g *Graph) Connect(edge Edge) { - g.init() - g.debug.Connect(edge) - - source := edge.Source() - target := edge.Target() - sourceCode := hashcode(source) - targetCode := hashcode(target) - - // Do we have this already? If so, don't add it again. - if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { - return - } - - // Add the edge to the set - g.edges.Add(edge) - - // Add the down edge - s, ok := g.downEdges[sourceCode] - if !ok { - s = new(Set) - g.downEdges[sourceCode] = s - } - s.Add(target) - - // Add the up edge - s, ok = g.upEdges[targetCode] - if !ok { - s = new(Set) - g.upEdges[targetCode] = s - } - s.Add(source) -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) StringWithNodeTypes() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - targetNodes := make(map[string]Vertex) - for _, target := range targets.List() { - dep := VertexName(target) - deps = append(deps, dep) - targetNodes[dep] = target - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) - } - } - - return buf.String() -} - -// String outputs some human-friendly output for the graph structure. -func (g *Graph) String() string { - var buf bytes.Buffer - - // Build the list of node names and a mapping so that we can more - // easily alphabetize the output to remain deterministic. - vertices := g.Vertices() - names := make([]string, 0, len(vertices)) - mapping := make(map[string]Vertex, len(vertices)) - for _, v := range vertices { - name := VertexName(v) - names = append(names, name) - mapping[name] = v - } - sort.Strings(names) - - // Write each node in order... - for _, name := range names { - v := mapping[name] - targets := g.downEdges[hashcode(v)] - - buf.WriteString(fmt.Sprintf("%s\n", name)) - - // Alphabetize dependencies - deps := make([]string, 0, targets.Len()) - for _, target := range targets.List() { - deps = append(deps, VertexName(target)) - } - sort.Strings(deps) - - // Write dependencies - for _, d := range deps { - buf.WriteString(fmt.Sprintf(" %s\n", d)) - } - } - - return buf.String() -} - -func (g *Graph) init() { - if g.vertices == nil { - g.vertices = new(Set) - } - if g.edges == nil { - g.edges = new(Set) - } - if g.downEdges == nil { - g.downEdges = make(map[interface{}]*Set) - } - if g.upEdges == nil { - g.upEdges = make(map[interface{}]*Set) - } -} - -// Dot returns a dot-formatted representation of the Graph. -func (g *Graph) Dot(opts *DotOpts) []byte { - return newMarshalGraph("", g).Dot(opts) -} - -// MarshalJSON returns a JSON representation of the entire Graph. -func (g *Graph) MarshalJSON() ([]byte, error) { - dg := newMarshalGraph("root", g) - return json.MarshalIndent(dg, "", " ") -} - -// SetDebugWriter sets the io.Writer where the Graph will record debug -// information. After this is set, the graph will immediately encode itself to -// the stream, and continue to record all subsequent operations. -func (g *Graph) SetDebugWriter(w io.Writer) { - g.debug = &encoder{w: w} - g.debug.Encode(newMarshalGraph("root", g)) -} - -// DebugVertexInfo encodes arbitrary information about a vertex in the graph -// debug logs. -func (g *Graph) DebugVertexInfo(v Vertex, info string) { - va := newVertexInfo(typeVertexInfo, v, info) - g.debug.Encode(va) -} - -// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug -// logs. -func (g *Graph) DebugEdgeInfo(e Edge, info string) { - ea := newEdgeInfo(typeEdgeInfo, e, info) - g.debug.Encode(ea) -} - -// DebugVisitInfo records a visit to a Vertex during a walk operation. -func (g *Graph) DebugVisitInfo(v Vertex, info string) { - vi := newVertexInfo(typeVisitInfo, v, info) - g.debug.Encode(vi) -} - -// DebugOperation marks the start of a set of graph transformations in -// the debug log, and returns a DebugOperationEnd func, which marks the end of -// the operation in the log. Additional information can be added to the log via -// the info parameter. -// -// The returned func's End method allows this method to be called from a single -// defer statement: -// defer g.DebugOperationBegin("OpName", "operating").End("") -// -// The returned function must be called to properly close the logical operation -// in the logs. -func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { - return g.debug.BeginOperation(operation, info) -} - -// VertexName returns the name of a vertex. -func VertexName(raw Vertex) string { - switch v := raw.(type) { - case NamedVertex: - return v.Name() - case fmt.Stringer: - return fmt.Sprintf("%s", v) - default: - return fmt.Sprintf("%v", v) - } -} diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go deleted file mode 100644 index c567d271..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/marshal.go +++ /dev/null @@ -1,474 +0,0 @@ -package dag - -import ( - "encoding/json" - "fmt" - "io" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -const ( - typeOperation = "Operation" - typeTransform = "Transform" - typeWalk = "Walk" - typeDepthFirstWalk = "DepthFirstWalk" - typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" - typeTransitiveReduction = "TransitiveReduction" - typeEdgeInfo = "EdgeInfo" - typeVertexInfo = "VertexInfo" - typeVisitInfo = "VisitInfo" -) - -// the marshal* structs are for serialization of the graph data. -type marshalGraph struct { - // Type is always "Graph", for identification as a top level object in the - // JSON stream. - Type string - - // Each marshal structure requires a unique ID so that it can be referenced - // by other structures. - ID string `json:",omitempty"` - - // Human readable name for this graph. - Name string `json:",omitempty"` - - // Arbitrary attributes that can be added to the output. - Attrs map[string]string `json:",omitempty"` - - // List of graph vertices, sorted by ID. - Vertices []*marshalVertex `json:",omitempty"` - - // List of edges, sorted by Source ID. - Edges []*marshalEdge `json:",omitempty"` - - // Any number of subgraphs. A subgraph itself is considered a vertex, and - // may be referenced by either end of an edge. - Subgraphs []*marshalGraph `json:",omitempty"` - - // Any lists of vertices that are included in cycles. - Cycles [][]*marshalVertex `json:",omitempty"` -} - -// The add, remove, connect, removeEdge methods mirror the basic Graph -// manipulations to reconstruct a marshalGraph from a debug log. -func (g *marshalGraph) add(v *marshalVertex) { - g.Vertices = append(g.Vertices, v) - sort.Sort(vertices(g.Vertices)) -} - -func (g *marshalGraph) remove(v *marshalVertex) { - for i, existing := range g.Vertices { - if v.ID == existing.ID { - g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) - return - } - } -} - -func (g *marshalGraph) connect(e *marshalEdge) { - g.Edges = append(g.Edges, e) - sort.Sort(edges(g.Edges)) -} - -func (g *marshalGraph) removeEdge(e *marshalEdge) { - for i, existing := range g.Edges { - if e.Source == existing.Source && e.Target == existing.Target { - g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) - return - } - } -} - -func (g *marshalGraph) vertexByID(id string) *marshalVertex { - for _, v := range g.Vertices { - if id == v.ID { - return v - } - } - return nil -} - -type marshalVertex struct { - // Unique ID, used to reference this vertex from other structures. - ID string - - // Human readable name - Name string `json:",omitempty"` - - Attrs map[string]string `json:",omitempty"` - - // This is to help transition from the old Dot interfaces. We record if the - // node was a GraphNodeDotter here, so we can call it to get attributes. - graphNodeDotter GraphNodeDotter -} - -func newMarshalVertex(v Vertex) *marshalVertex { - dn, ok := v.(GraphNodeDotter) - if !ok { - dn = nil - } - - return &marshalVertex{ - ID: marshalVertexID(v), - Name: VertexName(v), - Attrs: make(map[string]string), - graphNodeDotter: dn, - } -} - -// vertices is a sort.Interface implementation for sorting vertices by ID -type vertices []*marshalVertex - -func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } -func (v vertices) Len() int { return len(v) } -func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -type marshalEdge struct { - // Human readable name - Name string - - // Source and Target Vertices by ID - Source string - Target string - - Attrs map[string]string `json:",omitempty"` -} - -func newMarshalEdge(e Edge) *marshalEdge { - return &marshalEdge{ - Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), - Source: marshalVertexID(e.Source()), - Target: marshalVertexID(e.Target()), - Attrs: make(map[string]string), - } -} - -// edges is a sort.Interface implementation for sorting edges by Source ID -type edges []*marshalEdge - -func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } -func (e edges) Len() int { return len(e) } -func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } - -// build a marshalGraph structure from a *Graph -func newMarshalGraph(name string, g *Graph) *marshalGraph { - mg := &marshalGraph{ - Type: "Graph", - Name: name, - Attrs: make(map[string]string), - } - - for _, v := range g.Vertices() { - id := marshalVertexID(v) - if sg, ok := marshalSubgrapher(v); ok { - smg := newMarshalGraph(VertexName(v), sg) - smg.ID = id - mg.Subgraphs = append(mg.Subgraphs, smg) - } - - mv := newMarshalVertex(v) - mg.Vertices = append(mg.Vertices, mv) - } - - sort.Sort(vertices(mg.Vertices)) - - for _, e := range g.Edges() { - mg.Edges = append(mg.Edges, newMarshalEdge(e)) - } - - sort.Sort(edges(mg.Edges)) - - for _, c := range (&AcyclicGraph{*g}).Cycles() { - var cycle []*marshalVertex - for _, v := range c { - mv := newMarshalVertex(v) - cycle = append(cycle, mv) - } - mg.Cycles = append(mg.Cycles, cycle) - } - - return mg -} - -// Attempt to return a unique ID for any vertex. -func marshalVertexID(v Vertex) string { - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return strconv.Itoa(int(val.Pointer())) - case reflect.Interface: - return strconv.Itoa(int(val.InterfaceData()[1])) - } - - if v, ok := v.(Hashable); ok { - h := v.Hashcode() - if h, ok := h.(string); ok { - return h - } - } - - // fallback to a name, which we hope is unique. - return VertexName(v) - - // we could try harder by attempting to read the arbitrary value from the - // interface, but we shouldn't get here from terraform right now. -} - -// check for a Subgrapher, and return the underlying *Graph. -func marshalSubgrapher(v Vertex) (*Graph, bool) { - sg, ok := v.(Subgrapher) - if !ok { - return nil, false - } - - switch g := sg.Subgraph().DirectedGraph().(type) { - case *Graph: - return g, true - case *AcyclicGraph: - return &g.Graph, true - } - - return nil, false -} - -// The DebugOperationEnd func type provides a way to call an End function via a -// method call, allowing for the chaining of methods in a defer statement. -type DebugOperationEnd func(string) - -// End calls function e with the info parameter, marking the end of this -// operation in the logs. -func (e DebugOperationEnd) End(info string) { e(info) } - -// encoder provides methods to write debug data to an io.Writer, and is a noop -// when no writer is present -type encoder struct { - sync.Mutex - w io.Writer -} - -// Encode is analogous to json.Encoder.Encode -func (e *encoder) Encode(i interface{}) { - if e == nil || e.w == nil { - return - } - e.Lock() - defer e.Unlock() - - js, err := json.Marshal(i) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } - js = append(js, '\n') - - _, err = e.w.Write(js) - if err != nil { - log.Println("[ERROR] dag:", err) - return - } -} - -func (e *encoder) Add(v Vertex) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - AddVertex: newMarshalVertex(v), - }) -} - -// Remove records the removal of Vertex v. -func (e *encoder) Remove(v Vertex) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveVertex: newMarshalVertex(v), - }) -} - -func (e *encoder) Connect(edge Edge) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - AddEdge: newMarshalEdge(edge), - }) -} - -func (e *encoder) RemoveEdge(edge Edge) { - if e == nil { - return - } - e.Encode(marshalTransform{ - Type: typeTransform, - RemoveEdge: newMarshalEdge(edge), - }) -} - -// BeginOperation marks the start of set of graph transformations, and returns -// an EndDebugOperation func to be called once the opration is complete. -func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { - if e == nil { - return func(string) {} - } - - e.Encode(marshalOperation{ - Type: typeOperation, - Begin: op, - Info: info, - }) - - return func(info string) { - e.Encode(marshalOperation{ - Type: typeOperation, - End: op, - Info: info, - }) - } -} - -// structure for recording graph transformations -type marshalTransform struct { - // Type: "Transform" - Type string - AddEdge *marshalEdge `json:",omitempty"` - RemoveEdge *marshalEdge `json:",omitempty"` - AddVertex *marshalVertex `json:",omitempty"` - RemoveVertex *marshalVertex `json:",omitempty"` -} - -func (t marshalTransform) Transform(g *marshalGraph) { - switch { - case t.AddEdge != nil: - g.connect(t.AddEdge) - case t.RemoveEdge != nil: - g.removeEdge(t.RemoveEdge) - case t.AddVertex != nil: - g.add(t.AddVertex) - case t.RemoveVertex != nil: - g.remove(t.RemoveVertex) - } -} - -// this structure allows us to decode any object in the json stream for -// inspection, then re-decode it into a proper struct if needed. -type streamDecode struct { - Type string - Map map[string]interface{} - JSON []byte -} - -func (s *streamDecode) UnmarshalJSON(d []byte) error { - s.JSON = d - err := json.Unmarshal(d, &s.Map) - if err != nil { - return err - } - - if t, ok := s.Map["Type"]; ok { - s.Type, _ = t.(string) - } - return nil -} - -// structure for recording the beginning and end of any multi-step -// transformations. These are informational, and not required to reproduce the -// graph state. -type marshalOperation struct { - Type string - Begin string `json:",omitempty"` - End string `json:",omitempty"` - Info string `json:",omitempty"` -} - -// decodeGraph decodes a marshalGraph from an encoded graph stream. -func decodeGraph(r io.Reader) (*marshalGraph, error) { - dec := json.NewDecoder(r) - - // a stream should always start with a graph - g := &marshalGraph{} - - err := dec.Decode(g) - if err != nil { - return nil, err - } - - // now replay any operations that occurred on the original graph - for dec.More() { - s := &streamDecode{} - err := dec.Decode(s) - if err != nil { - return g, err - } - - // the only Type we're concerned with here is Transform to complete the - // Graph - if s.Type != typeTransform { - continue - } - - t := &marshalTransform{} - err = json.Unmarshal(s.JSON, t) - if err != nil { - return g, err - } - t.Transform(g) - } - return g, nil -} - -// marshalVertexInfo allows encoding arbitrary information about the a single -// Vertex in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalVertexInfo struct { - Type string - Vertex *marshalVertex - Info string -} - -func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { - return &marshalVertexInfo{ - Type: infoType, - Vertex: newMarshalVertex(v), - Info: info, - } -} - -// marshalEdgeInfo allows encoding arbitrary information about the a single -// Edge in the logs. These are accumulated for informational display while -// rebuilding the graph. -type marshalEdgeInfo struct { - Type string - Edge *marshalEdge - Info string -} - -func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { - return &marshalEdgeInfo{ - Type: infoType, - Edge: newMarshalEdge(e), - Info: info, - } -} - -// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final -// graph dot format. -// -// TODO: Allow returning the output at a certain point during decode. -// Encode extra information from the json log into the Dot. -func JSON2Dot(r io.Reader) ([]byte, error) { - g, err := decodeGraph(r) - if err != nil { - return nil, err - } - - return g.Dot(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go deleted file mode 100644 index 92b42151..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/set.go +++ /dev/null @@ -1,123 +0,0 @@ -package dag - -import ( - "sync" -) - -// Set is a set data structure. -type Set struct { - m map[interface{}]interface{} - once sync.Once -} - -// Hashable is the interface used by set to get the hash code of a value. -// If this isn't given, then the value of the item being added to the set -// itself is used as the comparison value. -type Hashable interface { - Hashcode() interface{} -} - -// hashcode returns the hashcode used for set elements. -func hashcode(v interface{}) interface{} { - if h, ok := v.(Hashable); ok { - return h.Hashcode() - } - - return v -} - -// Add adds an item to the set -func (s *Set) Add(v interface{}) { - s.once.Do(s.init) - s.m[hashcode(v)] = v -} - -// Delete removes an item from the set. -func (s *Set) Delete(v interface{}) { - s.once.Do(s.init) - delete(s.m, hashcode(v)) -} - -// Include returns true/false of whether a value is in the set. -func (s *Set) Include(v interface{}) bool { - s.once.Do(s.init) - _, ok := s.m[hashcode(v)] - return ok -} - -// Intersection computes the set intersection with other. -func (s *Set) Intersection(other *Set) *Set { - result := new(Set) - if s == nil { - return result - } - if other != nil { - for _, v := range s.m { - if other.Include(v) { - result.Add(v) - } - } - } - - return result -} - -// Difference returns a set with the elements that s has but -// other doesn't. -func (s *Set) Difference(other *Set) *Set { - result := new(Set) - if s != nil { - for k, v := range s.m { - var ok bool - if other != nil { - _, ok = other.m[k] - } - if !ok { - result.Add(v) - } - } - } - - return result -} - -// Filter returns a set that contains the elements from the receiver -// where the given callback returns true. -func (s *Set) Filter(cb func(interface{}) bool) *Set { - result := new(Set) - - for _, v := range s.m { - if cb(v) { - result.Add(v) - } - } - - return result -} - -// Len is the number of items in the set. -func (s *Set) Len() int { - if s == nil { - return 0 - } - - return len(s.m) -} - -// List returns the list of set elements. -func (s *Set) List() []interface{} { - if s == nil { - return nil - } - - r := make([]interface{}, 0, len(s.m)) - for _, v := range s.m { - r = append(r, v) - } - - return r -} - -func (s *Set) init() { - s.m = make(map[interface{}]interface{}) -} diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go deleted file mode 100644 index 9d8b25ce..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/tarjan.go +++ /dev/null @@ -1,107 +0,0 @@ -package dag - -// StronglyConnected returns the list of strongly connected components -// within the Graph g. This information is primarily used by this package -// for cycle detection, but strongly connected components have widespread -// use. -func StronglyConnected(g *Graph) [][]Vertex { - vs := g.Vertices() - acct := sccAcct{ - NextIndex: 1, - VertexIndex: make(map[Vertex]int, len(vs)), - } - for _, v := range vs { - // Recurse on any non-visited nodes - if acct.VertexIndex[v] == 0 { - stronglyConnected(&acct, g, v) - } - } - return acct.SCC -} - -func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { - // Initial vertex visit - index := acct.visit(v) - minIdx := index - - for _, raw := range g.DownEdges(v).List() { - target := raw.(Vertex) - targetIdx := acct.VertexIndex[target] - - // Recurse on successor if not yet visited - if targetIdx == 0 { - minIdx = min(minIdx, stronglyConnected(acct, g, target)) - } else if acct.inStack(target) { - // Check if the vertex is in the stack - minIdx = min(minIdx, targetIdx) - } - } - - // Pop the strongly connected components off the stack if - // this is a root vertex - if index == minIdx { - var scc []Vertex - for { - v2 := acct.pop() - scc = append(scc, v2) - if v2 == v { - break - } - } - - acct.SCC = append(acct.SCC, scc) - } - - return minIdx -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -// sccAcct is used ot pass around accounting information for -// the StronglyConnectedComponents algorithm -type sccAcct struct { - NextIndex int - VertexIndex map[Vertex]int - Stack []Vertex - SCC [][]Vertex -} - -// visit assigns an index and pushes a vertex onto the stack -func (s *sccAcct) visit(v Vertex) int { - idx := s.NextIndex - s.VertexIndex[v] = idx - s.NextIndex++ - s.push(v) - return idx -} - -// push adds a vertex to the stack -func (s *sccAcct) push(n Vertex) { - s.Stack = append(s.Stack, n) -} - -// pop removes a vertex from the stack -func (s *sccAcct) pop() Vertex { - n := len(s.Stack) - if n == 0 { - return nil - } - vertex := s.Stack[n-1] - s.Stack = s.Stack[:n-1] - return vertex -} - -// inStack checks if a vertex is in the stack -func (s *sccAcct) inStack(needle Vertex) bool { - for _, n := range s.Stack { - if n == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go deleted file mode 100644 index 509d76a3..00000000 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ /dev/null @@ -1,459 +0,0 @@ -package dag - -import ( - "errors" - "log" - "sync" - "time" - - "github.com/hashicorp/terraform/tfdiags" -) - -// Walker is used to walk every vertex of a graph in parallel. -// -// A vertex will only be walked when the dependencies of that vertex have -// been walked. If two vertices can be walked at the same time, they will be. -// -// Update can be called to update the graph. This can be called even during -// a walk, changing vertices/edges mid-walk. This should be done carefully. -// If a vertex is removed but has already been executed, the result of that -// execution (any error) is still returned by Wait. Changing or re-adding -// a vertex that has already executed has no effect. Changing edges of -// a vertex that has already executed has no effect. -// -// Non-parallelism can be enforced by introducing a lock in your callback -// function. However, the goroutine overhead of a walk will remain. -// Walker will create V*2 goroutines (one for each vertex, and dependency -// waiter for each vertex). In general this should be of no concern unless -// there are a huge number of vertices. -// -// The walk is depth first by default. This can be changed with the Reverse -// option. -// -// A single walker is only valid for one graph walk. After the walk is complete -// you must construct a new walker to walk again. State for the walk is never -// deleted in case vertices or edges are changed. -type Walker struct { - // Callback is what is called for each vertex - Callback WalkFunc - - // Reverse, if true, causes the source of an edge to depend on a target. - // When false (default), the target depends on the source. - Reverse bool - - // changeLock must be held to modify any of the fields below. Only Update - // should modify these fields. Modifying them outside of Update can cause - // serious problems. - changeLock sync.Mutex - vertices Set - edges Set - vertexMap map[Vertex]*walkerVertex - - // wait is done when all vertices have executed. It may become "undone" - // if new vertices are added. - wait sync.WaitGroup - - // diagsMap contains the diagnostics recorded so far for execution, - // and upstreamFailed contains all the vertices whose problems were - // caused by upstream failures, and thus whose diagnostics should be - // excluded from the final set. - // - // Readers and writers of either map must hold diagsLock. - diagsMap map[Vertex]tfdiags.Diagnostics - upstreamFailed map[Vertex]struct{} - diagsLock sync.Mutex -} - -type walkerVertex struct { - // These should only be set once on initialization and never written again. - // They are not protected by a lock since they don't need to be since - // they are write-once. - - // DoneCh is closed when this vertex has completed execution, regardless - // of success. - // - // CancelCh is closed when the vertex should cancel execution. If execution - // is already complete (DoneCh is closed), this has no effect. Otherwise, - // execution is cancelled as quickly as possible. - DoneCh chan struct{} - CancelCh chan struct{} - - // Dependency information. Any changes to any of these fields requires - // holding DepsLock. - // - // DepsCh is sent a single value that denotes whether the upstream deps - // were successful (no errors). Any value sent means that the upstream - // dependencies are complete. No other values will ever be sent again. - // - // DepsUpdateCh is closed when there is a new DepsCh set. - DepsCh chan bool - DepsUpdateCh chan struct{} - DepsLock sync.Mutex - - // Below is not safe to read/write in parallel. This behavior is - // enforced by changes only happening in Update. Nothing else should - // ever modify these. - deps map[Vertex]chan struct{} - depsCancelCh chan struct{} -} - -// errWalkUpstream is used in the errMap of a walk to note that an upstream -// dependency failed so this vertex wasn't run. This is not shown in the final -// user-returned error. -var errWalkUpstream = errors.New("upstream dependency failed") - -// Wait waits for the completion of the walk and returns diagnostics describing -// any problems that arose. Update should be called to populate the walk with -// vertices and edges prior to calling this. -// -// Wait will return as soon as all currently known vertices are complete. -// If you plan on calling Update with more vertices in the future, you -// should not call Wait until after this is done. -func (w *Walker) Wait() tfdiags.Diagnostics { - // Wait for completion - w.wait.Wait() - - var diags tfdiags.Diagnostics - w.diagsLock.Lock() - for v, vDiags := range w.diagsMap { - if _, upstream := w.upstreamFailed[v]; upstream { - // Ignore diagnostics for nodes that had failed upstreams, since - // the downstream diagnostics are likely to be redundant. - continue - } - diags = diags.Append(vDiags) - } - w.diagsLock.Unlock() - - return diags -} - -// Update updates the currently executing walk with the given graph. -// This will perform a diff of the vertices and edges and update the walker. -// Already completed vertices remain completed (including any errors during -// their execution). -// -// This returns immediately once the walker is updated; it does not wait -// for completion of the walk. -// -// Multiple Updates can be called in parallel. Update can be called at any -// time during a walk. -func (w *Walker) Update(g *AcyclicGraph) { - log.Print("[TRACE] dag/walk: updating graph") - var v, e *Set - if g != nil { - v, e = g.vertices, g.edges - } - - // Grab the change lock so no more updates happen but also so that - // no new vertices are executed during this time since we may be - // removing them. - w.changeLock.Lock() - defer w.changeLock.Unlock() - - // Initialize fields - if w.vertexMap == nil { - w.vertexMap = make(map[Vertex]*walkerVertex) - } - - // Calculate all our sets - newEdges := e.Difference(&w.edges) - oldEdges := w.edges.Difference(e) - newVerts := v.Difference(&w.vertices) - oldVerts := w.vertices.Difference(v) - - // Add the new vertices - for _, raw := range newVerts.List() { - v := raw.(Vertex) - - // Add to the waitgroup so our walk is not done until everything finishes - w.wait.Add(1) - - // Add to our own set so we know about it already - log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) - w.vertices.Add(raw) - - // Initialize the vertex info - info := &walkerVertex{ - DoneCh: make(chan struct{}), - CancelCh: make(chan struct{}), - deps: make(map[Vertex]chan struct{}), - } - - // Add it to the map and kick off the walk - w.vertexMap[v] = info - } - - // Remove the old vertices - for _, raw := range oldVerts.List() { - v := raw.(Vertex) - - // Get the vertex info so we can cancel it - info, ok := w.vertexMap[v] - if !ok { - // This vertex for some reason was never in our map. This - // shouldn't be possible. - continue - } - - // Cancel the vertex - close(info.CancelCh) - - // Delete it out of the map - delete(w.vertexMap, v) - - log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) - w.vertices.Delete(raw) - } - - // Add the new edges - var changedDeps Set - for _, raw := range newEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Get the info for the dep - depInfo, ok := w.vertexMap[dep] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Add the dependency to our waiter - waiterInfo.deps[dep] = depInfo.DoneCh - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: added edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Add(raw) - } - - // Process reoved edges - for _, raw := range oldEdges.List() { - edge := raw.(Edge) - waiter, dep := w.edgeParts(edge) - - // Get the info for the waiter - waiterInfo, ok := w.vertexMap[waiter] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Delete the dependency from the waiter - delete(waiterInfo.deps, dep) - - // Record that the deps changed for this waiter - changedDeps.Add(waiter) - - log.Printf( - "[TRACE] dag/walk: removed edge: %q waiting on %q", - VertexName(waiter), VertexName(dep)) - w.edges.Delete(raw) - } - - // For each vertex with changed dependencies, we need to kick off - // a new waiter and notify the vertex of the changes. - for _, raw := range changedDeps.List() { - v := raw.(Vertex) - info, ok := w.vertexMap[v] - if !ok { - // Vertex doesn't exist... shouldn't be possible but ignore. - continue - } - - // Create a new done channel - doneCh := make(chan bool, 1) - - // Create the channel we close for cancellation - cancelCh := make(chan struct{}) - - // Build a new deps copy - deps := make(map[Vertex]<-chan struct{}) - for k, v := range info.deps { - deps[k] = v - } - - // Update the update channel - info.DepsLock.Lock() - if info.DepsUpdateCh != nil { - close(info.DepsUpdateCh) - } - info.DepsCh = doneCh - info.DepsUpdateCh = make(chan struct{}) - info.DepsLock.Unlock() - - // Cancel the older waiter - if info.depsCancelCh != nil { - close(info.depsCancelCh) - } - info.depsCancelCh = cancelCh - - log.Printf( - "[TRACE] dag/walk: dependencies changed for %q, sending new deps", - VertexName(v)) - - // Start the waiter - go w.waitDeps(v, deps, doneCh, cancelCh) - } - - // Start all the new vertices. We do this at the end so that all - // the edge waiters and changes are setup above. - for _, raw := range newVerts.List() { - v := raw.(Vertex) - go w.walkVertex(v, w.vertexMap[v]) - } -} - -// edgeParts returns the waiter and the dependency, in that order. -// The waiter is waiting on the dependency. -func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { - if w.Reverse { - return e.Source(), e.Target() - } - - return e.Target(), e.Source() -} - -// walkVertex walks a single vertex, waiting for any dependencies before -// executing the callback. -func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { - // When we're done executing, lower the waitgroup count - defer w.wait.Done() - - // When we're done, always close our done channel - defer close(info.DoneCh) - - // Wait for our dependencies. We create a [closed] deps channel so - // that we can immediately fall through to load our actual DepsCh. - var depsSuccess bool - var depsUpdateCh chan struct{} - depsCh := make(chan bool, 1) - depsCh <- true - close(depsCh) - for { - select { - case <-info.CancelCh: - // Cancel - return - - case depsSuccess = <-depsCh: - // Deps complete! Mark as nil to trigger completion handling. - depsCh = nil - - case <-depsUpdateCh: - // New deps, reloop - } - - // Check if we have updated dependencies. This can happen if the - // dependencies were satisfied exactly prior to an Update occurring. - // In that case, we'd like to take into account new dependencies - // if possible. - info.DepsLock.Lock() - if info.DepsCh != nil { - depsCh = info.DepsCh - info.DepsCh = nil - } - if info.DepsUpdateCh != nil { - depsUpdateCh = info.DepsUpdateCh - } - info.DepsLock.Unlock() - - // If we still have no deps channel set, then we're done! - if depsCh == nil { - break - } - } - - // If we passed dependencies, we just want to check once more that - // we're not cancelled, since this can happen just as dependencies pass. - select { - case <-info.CancelCh: - // Cancelled during an update while dependencies completed. - return - default: - } - - // Run our callback or note that our upstream failed - var diags tfdiags.Diagnostics - var upstreamFailed bool - if depsSuccess { - log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) - diags = w.Callback(v) - } else { - log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) - // This won't be displayed to the user because we'll set upstreamFailed, - // but we need to ensure there's at least one error in here so that - // the failures will cascade downstream. - diags = diags.Append(errors.New("upstream dependencies failed")) - upstreamFailed = true - } - - // Record the result (we must do this after execution because we mustn't - // hold diagsLock while visiting a vertex.) - w.diagsLock.Lock() - if w.diagsMap == nil { - w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) - } - w.diagsMap[v] = diags - if w.upstreamFailed == nil { - w.upstreamFailed = make(map[Vertex]struct{}) - } - if upstreamFailed { - w.upstreamFailed[v] = struct{}{} - } - w.diagsLock.Unlock() -} - -func (w *Walker) waitDeps( - v Vertex, - deps map[Vertex]<-chan struct{}, - doneCh chan<- bool, - cancelCh <-chan struct{}) { - - // For each dependency given to us, wait for it to complete - for dep, depCh := range deps { - DepSatisfied: - for { - select { - case <-depCh: - // Dependency satisfied! - break DepSatisfied - - case <-cancelCh: - // Wait cancelled. Note that we didn't satisfy dependencies - // so that anything waiting on us also doesn't run. - doneCh <- false - return - - case <-time.After(time.Second * 5): - log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", - VertexName(v), VertexName(dep)) - } - } - } - - // Dependencies satisfied! We need to check if any errored - w.diagsLock.Lock() - defer w.diagsLock.Unlock() - for dep := range deps { - if w.diagsMap[dep].HasErrors() { - // One of our dependencies failed, so return false - doneCh <- false - return - } - } - - // All dependencies satisfied and successful - doneCh <- true -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/doc.go b/vendor/github.com/hashicorp/terraform/experiments/doc.go deleted file mode 100644 index 5538d739..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package experiments contains the models and logic for opt-in experiments -// that can be activated for a particular Terraform module. -// -// We use experiments to get feedback on new configuration language features -// in a way that permits breaking changes without waiting for a future minor -// release. Any feature behind an experiment flag is subject to change in any -// way in even a patch release, until we have enough confidence about the -// design of the feature to make compatibility commitments about it. -package experiments diff --git a/vendor/github.com/hashicorp/terraform/experiments/errors.go b/vendor/github.com/hashicorp/terraform/experiments/errors.go deleted file mode 100644 index a1fdc6f5..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/errors.go +++ /dev/null @@ -1,26 +0,0 @@ -package experiments - -import ( - "fmt" -) - -// UnavailableError is the error type returned by GetCurrent when the requested -// experiment is not recognized at all. -type UnavailableError struct { - ExperimentName string -} - -func (e UnavailableError) Error() string { - return fmt.Sprintf("no current experiment is named %q", e.ExperimentName) -} - -// ConcludedError is the error type returned by GetCurrent when the requested -// experiment is recognized as concluded. -type ConcludedError struct { - ExperimentName string - Message string -} - -func (e ConcludedError) Error() string { - return fmt.Sprintf("experiment %q has concluded: %s", e.ExperimentName, e.Message) -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/experiment.go b/vendor/github.com/hashicorp/terraform/experiments/experiment.go deleted file mode 100644 index 037b34e7..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/experiment.go +++ /dev/null @@ -1,93 +0,0 @@ -package experiments - -// Experiment represents a particular experiment, which can be activated -// independently of all other experiments. -type Experiment string - -// All active and defunct experiments must be represented by constants whose -// internal string values are unique. -// -// Each of these declared constants must also be registered as either a -// current or a defunct experiment in the init() function below. -// -// Each experiment is represented by a string that must be a valid HCL -// identifier so that it can be specified in configuration. -const ( - VariableValidation = Experiment("variable_validation") -) - -func init() { - // Each experiment constant defined above must be registered here as either - // a current or a concluded experiment. - registerCurrentExperiment(VariableValidation) -} - -// GetCurrent takes an experiment name and returns the experiment value -// representing that expression if and only if it is a current experiment. -// -// If the selected experiment is concluded, GetCurrent will return an -// error of type ConcludedError whose message hopefully includes some guidance -// for users of the experiment on how to migrate to a stable feature that -// succeeded it. -// -// If the selected experiment is not known at all, GetCurrent will return an -// error of type UnavailableError. -func GetCurrent(name string) (Experiment, error) { - exp := Experiment(name) - if currentExperiments.Has(exp) { - return exp, nil - } - - if msg, concluded := concludedExperiments[exp]; concluded { - return Experiment(""), ConcludedError{ExperimentName: name, Message: msg} - } - - return Experiment(""), UnavailableError{ExperimentName: name} -} - -// Keyword returns the keyword that would be used to activate this experiment -// in the configuration. -func (e Experiment) Keyword() string { - return string(e) -} - -// IsCurrent returns true if the receiver is considered a currently-selectable -// experiment. -func (e Experiment) IsCurrent() bool { - return currentExperiments.Has(e) -} - -// IsConcluded returns true if the receiver is a concluded experiment. -func (e Experiment) IsConcluded() bool { - _, exists := concludedExperiments[e] - return exists -} - -// currentExperiments are those which are available to activate in the current -// version of Terraform. -// -// Members of this set are registered in the init function above. -var currentExperiments = make(Set) - -// concludedExperiments are those which were available to activate in an earlier -// version of Terraform but are no longer available, either because the feature -// in question has been implemented or because the experiment failed and the -// feature was abandoned. Each experiment maps to a message describing the -// outcome, so we can give users feedback about what they might do in modules -// using concluded experiments. -// -// After an experiment has been concluded for a whole major release span it can -// be removed, since we expect users to perform upgrades one major release at -// at time without skipping and thus they will see the concludedness error -// message as they upgrade through a prior major version. -// -// Members of this map are registered in the init function above. -var concludedExperiments = make(map[Experiment]string) - -func registerCurrentExperiment(exp Experiment) { - currentExperiments.Add(exp) -} - -func registerConcludedExperiment(exp Experiment, message string) { - concludedExperiments[exp] = message -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/set.go b/vendor/github.com/hashicorp/terraform/experiments/set.go deleted file mode 100644 index 8247e212..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/set.go +++ /dev/null @@ -1,46 +0,0 @@ -package experiments - -// Set is a collection of experiments where every experiment is either a member -// or not. -type Set map[Experiment]struct{} - -// NewSet constructs a new Set with the given experiments as its initial members. -func NewSet(exps ...Experiment) Set { - ret := make(Set) - for _, exp := range exps { - ret.Add(exp) - } - return ret -} - -// SetUnion constructs a new Set containing the members of all of the given -// sets. -func SetUnion(sets ...Set) Set { - ret := make(Set) - for _, set := range sets { - for exp := range set { - ret.Add(exp) - } - } - return ret -} - -// Add inserts the given experiment into the set. -// -// If the given experiment is already present then this is a no-op. -func (s Set) Add(exp Experiment) { - s[exp] = struct{}{} -} - -// Remove takes the given experiment out of the set. -// -// If the given experiment not already present then this is a no-op. -func (s Set) Remove(exp Experiment) { - delete(s, exp) -} - -// Has tests whether the given experiment is in the receiving set. -func (s Set) Has(exp Experiment) bool { - _, ok := s[exp] - return ok -} diff --git a/vendor/github.com/hashicorp/terraform/experiments/testing.go b/vendor/github.com/hashicorp/terraform/experiments/testing.go deleted file mode 100644 index 54ff2dfd..00000000 --- a/vendor/github.com/hashicorp/terraform/experiments/testing.go +++ /dev/null @@ -1,33 +0,0 @@ -package experiments - -import ( - "testing" -) - -// OverrideForTesting temporarily overrides the global tables -// of experiments in order to allow for a predictable set when unit testing -// the experiments infrastructure code. -// -// The correct way to use this function is to defer a call to its result so -// that the original tables can be restored at the conclusion of the calling -// test: -// -// defer experiments.OverrideForTesting(t, current, concluded)() -// -// This function modifies global variables that are normally fixed throughout -// our execution, so this function must not be called from non-test code and -// any test using it cannot safely run concurrently with other tests. -func OverrideForTesting(t *testing.T, current Set, concluded map[Experiment]string) func() { - // We're not currently using the given *testing.T in here, but we're - // requiring it anyway in case we might need it in future, and because - // it hopefully reinforces that only test code should be calling this. - - realCurrents := currentExperiments - realConcludeds := concludedExperiments - currentExperiments = current - concludedExperiments = concluded - return func() { - currentExperiments = realCurrents - concludedExperiments = realConcludeds - } -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go deleted file mode 100644 index b9d15461..00000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hcl2shim.UnknownVariableValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go deleted file mode 100644 index 9ff6e426..00000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go deleted file mode 100644 index 46b72c40..00000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k, _ := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k, _ := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k, _ := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go deleted file mode 100644 index f470c9b4..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/config/decode.go +++ /dev/null @@ -1,28 +0,0 @@ -package config - -import ( - "github.com/mitchellh/mapstructure" -) - -func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { - var md mapstructure.Metadata - decoderConfig := &mapstructure.DecoderConfig{ - Metadata: &md, - Result: target, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decoderConfig) - if err != nil { - return nil, err - } - - for _, raw := range raws { - err := decoder.Decode(raw) - if err != nil { - return nil, err - } - } - - return &md, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go deleted file mode 100644 index 1a6e023b..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/config/validator.go +++ /dev/null @@ -1,214 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/terraform" -) - -// Validator is a helper that helps you validate the configuration -// of your resource, resource provider, etc. -// -// At the most basic level, set the Required and Optional lists to be -// specifiers of keys that are required or optional. If a key shows up -// that isn't in one of these two lists, then an error is generated. -// -// The "specifiers" allowed in this is a fairly rich syntax to help -// describe the format of your configuration: -// -// * Basic keys are just strings. For example: "foo" will match the -// "foo" key. -// -// * Nested structure keys can be matched by doing -// "listener.*.foo". This will verify that there is at least one -// listener element that has the "foo" key set. -// -// * The existence of a nested structure can be checked by simply -// doing "listener.*" which will verify that there is at least -// one element in the "listener" structure. This is NOT -// validating that "listener" is an array. It is validating -// that it is a nested structure in the configuration. -// -type Validator struct { - Required []string - Optional []string -} - -func (v *Validator) Validate( - c *terraform.ResourceConfig) (ws []string, es []error) { - // Flatten the configuration so it is easier to reason about - flat := flatmap.Flatten(c.Raw) - - keySet := make(map[string]validatorKey) - for i, vs := range [][]string{v.Required, v.Optional} { - req := i == 0 - for _, k := range vs { - vk, err := newValidatorKey(k, req) - if err != nil { - es = append(es, err) - continue - } - - keySet[k] = vk - } - } - - purged := make([]string, 0) - for _, kv := range keySet { - p, w, e := kv.Validate(flat) - if len(w) > 0 { - ws = append(ws, w...) - } - if len(e) > 0 { - es = append(es, e...) - } - - purged = append(purged, p...) - } - - // Delete all the keys we processed in order to find - // the unknown keys. - for _, p := range purged { - delete(flat, p) - } - - // The rest are unknown - for k, _ := range flat { - es = append(es, fmt.Errorf("Unknown configuration: %s", k)) - } - - return -} - -type validatorKey interface { - // Validate validates the given configuration and returns viewed keys, - // warnings, and errors. - Validate(map[string]string) ([]string, []string, []error) -} - -func newValidatorKey(k string, req bool) (validatorKey, error) { - var result validatorKey - - parts := strings.Split(k, ".") - if len(parts) > 1 && parts[1] == "*" { - result = &nestedValidatorKey{ - Parts: parts, - Required: req, - } - } else { - result = &basicValidatorKey{ - Key: k, - Required: req, - } - } - - return result, nil -} - -// basicValidatorKey validates keys that are basic such as "foo" -type basicValidatorKey struct { - Key string - Required bool -} - -func (v *basicValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - for k, _ := range m { - // If we have the exact key its a match - if k == v.Key { - return []string{k}, nil, nil - } - } - - if !v.Required { - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", v.Key)} -} - -type nestedValidatorKey struct { - Parts []string - Required bool -} - -func (v *nestedValidatorKey) validate( - m map[string]string, - prefix string, - offset int) ([]string, []string, []error) { - if offset >= len(v.Parts) { - // We're at the end. Look for a specific key. - v2 := &basicValidatorKey{Key: prefix, Required: v.Required} - return v2.Validate(m) - } - - current := v.Parts[offset] - - // If we're at offset 0, special case to start at the next one. - if offset == 0 { - return v.validate(m, current, offset+1) - } - - // Determine if we're doing a "for all" or a specific key - if current != "*" { - // We're looking at a specific key, continue on. - return v.validate(m, prefix+"."+current, offset+1) - } - - // We're doing a "for all", so we loop over. - countStr, ok := m[prefix+".#"] - if !ok { - if !v.Required { - // It wasn't required, so its no problem. - return nil, nil, nil - } - - return nil, nil, []error{fmt.Errorf( - "Key not found: %s", prefix)} - } - - count, err := strconv.ParseInt(countStr, 0, 0) - if err != nil { - // This shouldn't happen if flatmap works properly - panic("invalid flatmap array") - } - - var e []error - var w []string - u := make([]string, 1, count+1) - u[0] = prefix + ".#" - for i := 0; i < int(count); i++ { - prefix := fmt.Sprintf("%s.%d", prefix, i) - - // Mark that we saw this specific key - u = append(u, prefix) - - // Mark all prefixes of this - for k, _ := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - u = append(u, k) - } - - // If we have more parts, then validate deeper - if offset+1 < len(v.Parts) { - u2, w2, e2 := v.validate(m, prefix, offset+1) - - u = append(u, u2...) - w = append(w, w2...) - e = append(e, e2...) - } - } - - return u, w, e -} - -func (v *nestedValidatorKey) Validate( - m map[string]string) ([]string, []string, []error) { - return v.validate(m, "", 0) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go deleted file mode 100644 index 54899bc6..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go +++ /dev/null @@ -1,24 +0,0 @@ -package didyoumean - -import ( - "github.com/agext/levenshtein" -) - -// NameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func NameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go deleted file mode 100644 index 67be1df1..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go +++ /dev/null @@ -1,41 +0,0 @@ -package hilmapstructure - -import ( - "fmt" - "reflect" - - "github.com/mitchellh/mapstructure" -) - -var hilMapstructureDecodeHookEmptySlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookEmptyMap map[string]interface{} - -// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a -// DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func WeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}") - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/indent.go b/vendor/github.com/hashicorp/terraform/helper/logging/indent.go deleted file mode 100644 index e0da0d7c..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/indent.go +++ /dev/null @@ -1,23 +0,0 @@ -package logging - -import ( - "strings" -) - -// Indent adds two spaces to the beginning of each line of the given string, -// with the goal of making the log level filter understand it as a line -// continuation rather than possibly as new log lines. -func Indent(s string) string { - var b strings.Builder - for len(s) > 0 { - end := strings.IndexByte(s, '\n') - if end == -1 { - end = len(s) - 1 - } - var l string - l, s = s[:end+1], s[end+1:] - b.WriteString(" ") - b.WriteString(l) - } - return b.String() -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/level.go b/vendor/github.com/hashicorp/terraform/helper/logging/level.go deleted file mode 100644 index 0dc4dfe8..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/level.go +++ /dev/null @@ -1,159 +0,0 @@ -package logging - -import ( - "bytes" - "io" - "sync" -) - -// LogLevel is a special string, conventionally written all in uppercase, that -// can be used to mark a log line for filtering and to specify filtering -// levels in the LevelFilter type. -type LogLevel string - -// LevelFilter is an io.Writer that can be used with a logger that -// will attempt to filter out log messages that aren't at least a certain -// level. -// -// This filtering is HEURISTIC-BASED, and so will not be 100% reliable. The -// assumptions it makes are: -// -// - Individual log messages are never split across multiple calls to the -// Write method. -// -// - Messages that carry levels are marked by a sequence starting with "[", -// then the level name string, and then "]". Any message without a sequence -// like this is an un-levelled message, and is not subject to filtering. -// -// - Each \n-delimited line in a write is a separate log message, unless a -// line starts with at least one space in which case it is interpreted -// as a continuation of the previous line. -// -// - If a log line starts with a non-whitespace character that isn't a digit -// then it's recognized as a degenerate continuation, because "real" log -// lines should start with a date/time and thus always have a leading -// digit. (This also cleans up after some situations where the assumptuion -// that messages arrive atomically aren't met, which is sadly sometimes -// true for longer messages that trip over some buffering behavior in -// panicwrap.) -// -// Because logging is a cross-cutting concern and not fully under the control -// of Terraform itself, there will certainly be cases where the above -// heuristics will fail. For example, it is likely that LevelFilter will -// occasionally misinterpret a continuation line as a new message because the -// code generating it doesn't know about our indentation convention. -// -// Our goal here is just to make a best effort to reduce the log volume, -// accepting that the results will not be 100% correct. -// -// Logging calls within Terraform Core should follow the above conventions so -// that the log output is broadly correct, however. -// -// Once the filter is in use somewhere, it is not safe to modify -// the structure. -type LevelFilter struct { - // Levels is the list of log levels, in increasing order of - // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. - Levels []LogLevel - - // MinLevel is the minimum level allowed through - MinLevel LogLevel - - // The underlying io.Writer where log messages that pass the filter - // will be set. - Writer io.Writer - - badLevels map[LogLevel]struct{} - show bool - once sync.Once -} - -// Check will check a given line if it would be included in the level -// filter. -func (f *LevelFilter) Check(line []byte) bool { - f.once.Do(f.init) - - // Check for a log level - var level LogLevel - x := bytes.IndexByte(line, '[') - if x >= 0 { - y := bytes.IndexByte(line[x:], ']') - if y >= 0 { - level = LogLevel(line[x+1 : x+y]) - } - } - - //return level == "" - - _, ok := f.badLevels[level] - return !ok -} - -// Write is a specialized implementation of io.Writer suitable for being -// the output of a logger from the "log" package. -// -// This Writer implementation assumes that it will only recieve byte slices -// containing one or more entire lines of log output, each one terminated by -// a newline. This is compatible with the behavior of the "log" package -// directly, and is also tolerant of intermediaries that might buffer multiple -// separate writes together, as long as no individual log line is ever -// split into multiple slices. -// -// Behavior is undefined if any log line is split across multiple writes or -// written without a trailing '\n' delimiter. -func (f *LevelFilter) Write(p []byte) (n int, err error) { - for len(p) > 0 { - // Split at the first \n, inclusive - idx := bytes.IndexByte(p, '\n') - if idx == -1 { - // Invalid, undelimited write. We'll tolerate it assuming that - // our assumptions are being violated, but the results may be - // non-ideal. - idx = len(p) - 1 - break - } - var l []byte - l, p = p[:idx+1], p[idx+1:] - // Lines starting with characters other than decimal digits (including - // whitespace) are assumed to be continuations lines. This is an - // imprecise heuristic, but experimentally it seems to generate - // "good enough" results from Terraform Core's own logging. Its mileage - // may vary with output from other systems. - if l[0] >= '0' && l[0] <= '9' { - f.show = f.Check(l) - } - if f.show { - _, err = f.Writer.Write(l) - if err != nil { - // Technically it's not correct to say we've written the whole - // buffer, but for our purposes here it's good enough as we're - // only implementing io.Writer enough to satisfy logging - // use-cases. - return len(p), err - } - } - } - - // We always behave as if we wrote the whole of the buffer, even if - // we actually skipped some lines. We're only implementiong io.Writer - // enough to satisfy logging use-cases. - return len(p), nil -} - -// SetMinLevel is used to update the minimum log level -func (f *LevelFilter) SetMinLevel(min LogLevel) { - f.MinLevel = min - f.init() -} - -func (f *LevelFilter) init() { - badLevels := make(map[LogLevel]struct{}) - for _, level := range f.Levels { - if level == f.MinLevel { - break - } - badLevels[level] = struct{}{} - } - f.badLevels = badLevels - f.show = true -} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go deleted file mode 100644 index 75627cf0..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go +++ /dev/null @@ -1,109 +0,0 @@ -package logging - -import ( - "io" - "io/ioutil" - "log" - "os" - "strings" - "syscall" -) - -// These are the environmental variables that determine if we log, and if -// we log whether or not the log should go to a file. -const ( - EnvLog = "TF_LOG" // Set to True - EnvLogFile = "TF_LOG_PATH" // Set to a file -) - -// ValidLevels are the log level names that Terraform recognizes. -var ValidLevels = []LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} - -// LogOutput determines where we should send logs (if anywhere) and the log level. -func LogOutput() (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := CurrentLogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - if logPath := os.Getenv(EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - if logLevel == "TRACE" { - // Just pass through logs directly then, without any level filtering at all. - return logOutput, nil - } - - // Otherwise we'll use our level filter, which is a heuristic-based - // best effort thing that is not totally reliable but helps to reduce - // the volume of logs in some cases. - logOutput = &LevelFilter{ - Levels: ValidLevels, - MinLevel: LogLevel(logLevel), - Writer: logOutput, - } - - return logOutput, nil -} - -// SetOutput checks for a log destination with LogOutput, and calls -// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses -// ioutil.Discard. Any error from LogOutout is fatal. -func SetOutput() { - out, err := LogOutput() - if err != nil { - log.Fatal(err) - } - - if out == nil { - out = ioutil.Discard - } - - log.SetOutput(out) -} - -// CurrentLogLevel returns the current log level string based the environment vars -func CurrentLogLevel() string { - envLevel := os.Getenv(EnvLog) - if envLevel == "" { - return "" - } - - logLevel := "TRACE" - if isValidLogLevel(envLevel) { - // allow following for better ux: info, Info or INFO - logLevel = strings.ToUpper(envLevel) - } else { - log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, ValidLevels) - } - if logLevel != "TRACE" { - log.Printf("[WARN] Log levels other than TRACE are currently unreliable, and are supported only for backward compatibility.\n Use TF_LOG=TRACE to see Terraform's internal logs.\n ----") - } - - return logLevel -} - -// IsDebugOrHigher returns whether or not the current log level is debug or trace -func IsDebugOrHigher() bool { - level := string(CurrentLogLevel()) - return level == "DEBUG" || level == "TRACE" -} - -func isValidLogLevel(level string) bool { - for _, l := range ValidLevels { - if strings.ToUpper(level) == string(l) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go deleted file mode 100644 index 82b5937b..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package plugin contains types and functions to help Terraform plugins -// implement the plugin rpc interface. -// The primary Provider type will be responsible for converting from the grpc -// wire protocol to the types and methods known to the provider -// implementations. -package plugin diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go deleted file mode 100644 index f32610c6..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go +++ /dev/null @@ -1,1398 +0,0 @@ -package plugin - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - - "github.com/zclconf/go-cty/cty" - ctyconvert "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/msgpack" - context "golang.org/x/net/context" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/terraform" -) - -const newExtraKey = "_new_extra_shim" - -// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a -// proto.ProviderServer implementation. If the provided provider is not a -// *schema.Provider, this will return nil, -func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { - sp, ok := p.(*schema.Provider) - if !ok { - return nil - } - - return &GRPCProviderServer{ - provider: sp, - } -} - -// GRPCProviderServer handles the server, or plugin side of the rpc connection. -type GRPCProviderServer struct { - provider *schema.Provider -} - -func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { - // Here we are certain that the provider is being called through grpc, so - // make sure the feature flag for helper/schema is set - schema.SetProto5() - - resp := &proto.GetProviderSchema_Response{ - ResourceSchemas: make(map[string]*proto.Schema), - DataSourceSchemas: make(map[string]*proto.Schema), - } - - resp.Provider = &proto.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), - } - - for typ, res := range s.provider.ResourcesMap { - resp.ResourceSchemas[typ] = &proto.Schema{ - Version: int64(res.SchemaVersion), - Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), - } - } - - for typ, dat := range s.provider.DataSourcesMap { - resp.DataSourceSchemas[typ] = &proto.Schema{ - Version: int64(dat.SchemaVersion), - Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), - } - } - - return resp, nil -} - -func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { - return schema.InternalMap(s.provider.Schema).CoreConfigSchema() -} - -func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { - res := s.provider.ResourcesMap[name] - return res.CoreConfigSchema() -} - -func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { - dat := s.provider.DataSourcesMap[name] - return dat.CoreConfigSchema() -} - -func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { - resp := &proto.PrepareProviderConfig_Response{} - - schemaBlock := s.getProviderSchemaBlock() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := s.provider.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return resp, nil - } - - configVal, err = schemaBlock.CoerceValue(configVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.Validate(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} - - return resp, nil -} - -func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { - resp := &proto.ValidateResourceTypeConfig_Response{} - - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.ValidateResource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { - resp := &proto.ValidateDataSourceConfig_Response{} - - schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.ValidateDataSource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { - resp := &proto.UpgradeResourceState_Response{} - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - version := int(req.Version) - - jsonMap := map[string]interface{}{} - var err error - - switch { - // We first need to upgrade a flatmap state if it exists. - // There should never be both a JSON and Flatmap state in the request. - case len(req.RawState.Flatmap) > 0: - jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - // if there's a JSON state, we need to decode it. - case len(req.RawState.Json) > 0: - err = json.Unmarshal(req.RawState.Json, &jsonMap) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - default: - log.Println("[DEBUG] no state provided to upgrade") - return resp, nil - } - - // complete the upgrade of the JSON states - jsonMap, err = s.upgradeJSONState(version, jsonMap, res) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // The provider isn't required to clean out removed fields - s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) - - // now we need to turn the state into the default json representation, so - // that it can be re-decoded using the actual schema. - val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Now we need to make sure blocks are represented correctly, which means - // that missing blocks are empty collections, rather than null. - // First we need to CoerceValue to ensure that all object types match. - val, err = schemaBlock.CoerceValue(val) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - // Normalize the value and fill in any missing blocks. - val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) - - // encode the final state to the expected msgpack format - newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} - return resp, nil -} - -// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate -// state if necessary, and converts it to the new JSON state format decoded as a -// map[string]interface{}. -// upgradeFlatmapState returns the json map along with the corresponding schema -// version. -func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { - // this will be the version we've upgraded so, defaulting to the given - // version in case no migration was called. - upgradedVersion := version - - // first determine if we need to call the legacy MigrateState func - requiresMigrate := version < res.SchemaVersion - - schemaType := res.CoreConfigSchema().ImpliedType() - - // if there are any StateUpgraders, then we need to only compare - // against the first version there - if len(res.StateUpgraders) > 0 { - requiresMigrate = version < res.StateUpgraders[0].Version - } - - if requiresMigrate && res.MigrateState == nil { - // Providers were previously allowed to bump the version - // without declaring MigrateState. - // If there are further upgraders, then we've only updated that far. - if len(res.StateUpgraders) > 0 { - schemaType = res.StateUpgraders[0].Type - upgradedVersion = res.StateUpgraders[0].Version - } - } else if requiresMigrate { - is := &terraform.InstanceState{ - ID: m["id"], - Attributes: m, - Meta: map[string]interface{}{ - "schema_version": strconv.Itoa(version), - }, - } - - is, err := res.MigrateState(version, is, s.provider.Meta()) - if err != nil { - return nil, 0, err - } - - // re-assign the map in case there was a copy made, making sure to keep - // the ID - m := is.Attributes - m["id"] = is.ID - - // if there are further upgraders, then we've only updated that far - if len(res.StateUpgraders) > 0 { - schemaType = res.StateUpgraders[0].Type - upgradedVersion = res.StateUpgraders[0].Version - } - } else { - // the schema version may be newer than the MigrateState functions - // handled and older than the current, but still stored in the flatmap - // form. If that's the case, we need to find the correct schema type to - // convert the state. - for _, upgrader := range res.StateUpgraders { - if upgrader.Version == version { - schemaType = upgrader.Type - break - } - } - } - - // now we know the state is up to the latest version that handled the - // flatmap format state. Now we can upgrade the format and continue from - // there. - newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) - if err != nil { - return nil, 0, err - } - - jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) - return jsonMap, upgradedVersion, err -} - -func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { - var err error - - for _, upgrader := range res.StateUpgraders { - if version != upgrader.Version { - continue - } - - m, err = upgrader.Upgrade(m, s.provider.Meta()) - if err != nil { - return nil, err - } - version++ - } - - return m, nil -} - -// Remove any attributes no longer present in the schema, so that the json can -// be correctly decoded. -func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { - // we're only concerned with finding maps that corespond to object - // attributes - switch v := v.(type) { - case []interface{}: - // If these aren't blocks the next call will be a noop - if ty.IsListType() || ty.IsSetType() { - eTy := ty.ElementType() - for _, eV := range v { - s.removeAttributes(eV, eTy) - } - } - return - case map[string]interface{}: - // map blocks aren't yet supported, but handle this just in case - if ty.IsMapType() { - eTy := ty.ElementType() - for _, eV := range v { - s.removeAttributes(eV, eTy) - } - return - } - - if ty == cty.DynamicPseudoType { - log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) - return - } - - if !ty.IsObjectType() { - // This shouldn't happen, and will fail to decode further on, so - // there's no need to handle it here. - log.Printf("[WARN] unexpected type %#v for map in json state", ty) - return - } - - attrTypes := ty.AttributeTypes() - for attr, attrV := range v { - attrTy, ok := attrTypes[attr] - if !ok { - log.Printf("[DEBUG] attribute %q no longer present in schema", attr) - delete(v, attr) - continue - } - - s.removeAttributes(attrV, attrTy) - } - } -} - -func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { - resp := &proto.Stop_Response{} - - err := s.provider.Stop() - if err != nil { - resp.Error = err.Error() - } - - return resp, nil -} - -func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { - resp := &proto.Configure_Response{} - - schemaBlock := s.getProviderSchemaBlock() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - s.provider.TerraformVersion = req.TerraformVersion - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - err = s.provider.Configure(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - - return resp, nil -} - -func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { - resp := &proto.ReadResource_Response{ - // helper/schema did previously handle private data during refresh, but - // core is now going to expect this to be maintained in order to - // persist it in the state. - Private: req.Private, - } - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - instanceState, err := res.ShimInstanceStateFromValue(stateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - private := make(map[string]interface{}) - if len(req.Private) > 0 { - if err := json.Unmarshal(req.Private, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - instanceState.Meta = private - - newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - if newInstanceState == nil || newInstanceState.ID == "" { - // The old provider API used an empty id to signal that the remote - // object appears to have been deleted, but our new protocol expects - // to see a null value (in the cty sense) in that case. - newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil - } - - // helper/schema should always copy the ID over, but do it again just to be safe - newInstanceState.Attributes["id"] = newInstanceState.ID - - newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = normalizeNullValues(newStateVal, stateVal, false) - newStateVal = copyTimeoutValues(newStateVal, stateVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - - return resp, nil -} - -func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { - resp := &proto.PlanResourceChange_Response{} - - // This is a signal to Terraform Core that we're doing the best we can to - // shim the legacy type system of the SDK onto the Terraform type system - // but we need it to cut us some slack. This setting should not be taken - // forward to any new SDK implementations, since setting it prevents us - // from catching certain classes of provider bug that can lead to - // confusing downstream errors. - resp.LegacyTypeSystem = true - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - create := priorStateVal.IsNull() - - proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // We don't usually plan destroys, but this can return early in any case. - if proposedNewStateVal.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - priorPrivate := make(map[string]interface{}) - if len(req.PriorPrivate) > 0 { - if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - priorState.Meta = priorPrivate - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // turn the proposed state into a legacy configuration - cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) - - diff, err := s.provider.SimpleDiff(info, priorState, cfg) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // if this is a new instance, we need to make sure ID is going to be computed - if create { - if diff == nil { - diff = terraform.NewInstanceDiff() - } - - diff.Attributes["id"] = &terraform.ResourceAttrDiff{ - NewComputed: true, - } - } - - if diff == nil || len(diff.Attributes) == 0 { - // schema.Provider.Diff returns nil if it ends up making a diff with no - // changes, but our new interface wants us to return an actual change - // description that _shows_ there are no changes. This is always the - // prior state, because we force a diff above if this is a new instance. - resp.PlannedState = req.PriorState - resp.PlannedPrivate = req.PriorPrivate - return resp, nil - } - - if priorState == nil { - priorState = &terraform.InstanceState{} - } - - // now we need to apply the diff to the prior state, so get the planned state - plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) - - plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) - - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) - - // The old SDK code has some imprecisions that cause it to sometimes - // generate differences that the SDK itself does not consider significant - // but Terraform Core would. To avoid producing weird do-nothing diffs - // in that case, we'll check if the provider as produced something we - // think is "equivalent" to the prior state and just return the prior state - // itself if so, thus ensuring that Terraform Core will treat this as - // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its - // accuracy. - forceNoChanges := false - if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { - plannedStateVal = priorStateVal - forceNoChanges = true - } - - // if this was creating the resource, we need to set any remaining computed - // fields - if create { - plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) - } - - plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.PlannedState = &proto.DynamicValue{ - Msgpack: plannedMP, - } - - // encode any timeouts into the diff Meta - t := &schema.ResourceTimeout{} - if err := t.ConfigDecode(res, cfg); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - if err := t.DiffEncode(diff); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Now we need to store any NewExtra values, which are where any actual - // StateFunc modified config fields are hidden. - privateMap := diff.Meta - if privateMap == nil { - privateMap = map[string]interface{}{} - } - - newExtra := map[string]interface{}{} - - for k, v := range diff.Attributes { - if v.NewExtra != nil { - newExtra[k] = v.NewExtra - } - } - privateMap[newExtraKey] = newExtra - - // the Meta field gets encoded into PlannedPrivate - plannedPrivate, err := json.Marshal(privateMap) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.PlannedPrivate = plannedPrivate - - // collect the attributes that require instance replacement, and convert - // them to cty.Paths. - var requiresNew []string - if !forceNoChanges { - for attr, d := range diff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - } - - // If anything requires a new resource already, or the "id" field indicates - // that we will be creating a new resource, then we need to add that to - // RequiresReplace so that core can tell if the instance is being replaced - // even if changes are being suppressed via "ignore_changes". - id := plannedStateVal.GetAttr("id") - if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { - requiresNew = append(requiresNew, "id") - } - - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // convert these to the protocol structures - for _, p := range requiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) - } - - return resp, nil -} - -func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { - resp := &proto.ApplyResourceChange_Response{ - // Start with the existing state as a fallback - NewState: req.PriorState, - } - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - private := make(map[string]interface{}) - if len(req.PlannedPrivate) > 0 { - if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - var diff *terraform.InstanceDiff - destroy := false - - // a null state means we are destroying the instance - if plannedStateVal.IsNull() { - destroy = true - diff = &terraform.InstanceDiff{ - Attributes: make(map[string]*terraform.ResourceAttrDiff), - Meta: make(map[string]interface{}), - Destroy: true, - } - } else { - diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - if diff == nil { - diff = &terraform.InstanceDiff{ - Attributes: make(map[string]*terraform.ResourceAttrDiff), - Meta: make(map[string]interface{}), - } - } - - // add NewExtra Fields that may have been stored in the private data - if newExtra := private[newExtraKey]; newExtra != nil { - for k, v := range newExtra.(map[string]interface{}) { - d := diff.Attributes[k] - - if d == nil { - d = &terraform.ResourceAttrDiff{} - } - - d.NewExtra = v - diff.Attributes[k] = d - } - } - - if private != nil { - diff.Meta = private - } - - for k, d := range diff.Attributes { - // We need to turn off any RequiresNew. There could be attributes - // without changes in here inserted by helper/schema, but if they have - // RequiresNew then the state will be dropped from the ResourceData. - d.RequiresNew = false - - // Check that any "removed" attributes that don't actually exist in the - // prior state, or helper/schema will confuse itself - if d.NewRemoved { - if _, ok := priorState.Attributes[k]; !ok { - delete(diff.Attributes, k) - } - } - } - - newInstanceState, err := s.provider.Apply(info, priorState, diff) - // we record the error here, but continue processing any returned state. - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - } - newStateVal := cty.NullVal(schemaBlock.ImpliedType()) - - // Always return a null value for destroy. - // While this is usually indicated by a nil state, check for missing ID or - // attributes in the case of a provider failure. - if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil - } - - // We keep the null val if we destroyed the resource, otherwise build the - // entire object, even if the new state was nil. - newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) - - newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - - meta, err := json.Marshal(newInstanceState.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.Private = meta - - // This is a signal to Terraform Core that we're doing the best we can to - // shim the legacy type system of the SDK onto the Terraform type system - // but we need it to cut us some slack. This setting should not be taken - // forward to any new SDK implementations, since setting it prevents us - // from catching certain classes of provider bug that can lead to - // confusing downstream errors. - resp.LegacyTypeSystem = true - - return resp, nil -} - -func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { - resp := &proto.ImportResourceState_Response{} - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - newInstanceStates, err := s.provider.ImportState(info, req.Id) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - for _, is := range newInstanceStates { - // copy the ID again just to be sure it wasn't missed - is.Attributes["id"] = is.ID - - resourceType := is.Ephemeral.Type - if resourceType == "" { - resourceType = req.TypeName - } - - schemaBlock := s.getResourceSchemaBlock(resourceType) - newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Normalize the value and fill in any missing blocks. - newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - meta, err := json.Marshal(is.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - importedResource := &proto.ImportResourceState_ImportedResource{ - TypeName: resourceType, - State: &proto.DynamicValue{ - Msgpack: newStateMP, - }, - Private: meta, - } - - resp.ImportedResources = append(resp.ImportedResources, importedResource) - } - - return resp, nil -} - -func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { - resp := &proto.ReadDataSource_Response{} - - schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - // we need to still build the diff separately with the Read method to match - // the old behavior - diff, err := s.provider.ReadDataDiff(info, config) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // now we can get the new complete data source - newInstanceState, err := s.provider.ReadDataApply(info, diff) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = copyTimeoutValues(newStateVal, configVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.State = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil -} - -func pathToAttributePath(path cty.Path) *proto.AttributePath { - var steps []*proto.AttributePath_Step - - for _, step := range path { - switch s := step.(type) { - case cty.GetAttrStep: - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: s.Name, - }, - }) - case cty.IndexStep: - ty := s.Key.Type() - switch ty { - case cty.Number: - i, _ := s.Key.AsBigFloat().Int64() - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: i, - }, - }) - case cty.String: - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: s.Key.AsString(), - }, - }) - } - } - } - - return &proto.AttributePath{Steps: steps} -} - -// helper/schema throws away timeout values from the config and stores them in -// the Private/Meta fields. we need to copy those values into the planned state -// so that core doesn't see a perpetual diff with the timeout block. -func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { - // if `to` is null we are planning to remove it altogether. - if to.IsNull() { - return to - } - toAttrs := to.AsValueMap() - // We need to remove the key since the hcl2shims will add a non-null block - // because we can't determine if a single block was null from the flatmapped - // values. This needs to conform to the correct schema for marshaling, so - // change the value to null rather than deleting it from the object map. - timeouts, ok := toAttrs[schema.TimeoutsConfigKey] - if ok { - toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) - } - - // if from is null then there are no timeouts to copy - if from.IsNull() { - return cty.ObjectVal(toAttrs) - } - - fromAttrs := from.AsValueMap() - timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] - - // timeouts shouldn't be unknown, but don't copy possibly invalid values either - if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { - // no timeouts block to copy - return cty.ObjectVal(toAttrs) - } - - toAttrs[schema.TimeoutsConfigKey] = timeouts - - return cty.ObjectVal(toAttrs) -} - -// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all -// StateFuncs and CustomizeDiffs removed. This will be used during apply to -// create a diff from a planned state where the diff modifications have already -// been applied. -func stripResourceModifiers(r *schema.Resource) *schema.Resource { - if r == nil { - return nil - } - // start with a shallow copy - newResource := new(schema.Resource) - *newResource = *r - - newResource.CustomizeDiff = nil - newResource.Schema = map[string]*schema.Schema{} - - for k, s := range r.Schema { - newResource.Schema[k] = stripSchema(s) - } - - return newResource -} - -func stripSchema(s *schema.Schema) *schema.Schema { - if s == nil { - return nil - } - // start with a shallow copy - newSchema := new(schema.Schema) - *newSchema = *s - - newSchema.StateFunc = nil - - switch e := newSchema.Elem.(type) { - case *schema.Schema: - newSchema.Elem = stripSchema(e) - case *schema.Resource: - newSchema.Elem = stripResourceModifiers(e) - } - - return newSchema -} - -// Zero values and empty containers may be interchanged by the apply process. -// When there is a discrepency between src and dst value being null or empty, -// prefer the src value. This takes a little more liberty with set types, since -// we can't correlate modified set values. In the case of sets, if the src set -// was wholly known we assume the value was correctly applied and copy that -// entirely to the new value. -// While apply prefers the src value, during plan we prefer dst whenever there -// is an unknown or a set is involved, since the plan can alter the value -// however it sees fit. This however means that a CustomizeDiffFunction may not -// be able to change a null to an empty value or vice versa, but that should be -// very uncommon nor was it reliable before 0.12 either. -func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { - ty := dst.Type() - if !src.IsNull() && !src.IsKnown() { - // Return src during plan to retain unknown interpolated placeholders, - // which could be lost if we're only updating a resource. If this is a - // read scenario, then there shouldn't be any unknowns at all. - if dst.IsNull() && !apply { - return src - } - return dst - } - - // Handle null/empty changes for collections during apply. - // A change between null and empty values prefers src to make sure the state - // is consistent between plan and apply. - if ty.IsCollectionType() && apply { - dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 - srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 - - if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { - return src - } - } - - // check the invariants that we need below, to ensure we are working with - // non-null and known values. - if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { - return dst - } - - switch { - case ty.IsMapType(), ty.IsObjectType(): - var dstMap map[string]cty.Value - if !dst.IsNull() { - dstMap = dst.AsValueMap() - } - if dstMap == nil { - dstMap = map[string]cty.Value{} - } - - srcMap := src.AsValueMap() - for key, v := range srcMap { - dstVal, ok := dstMap[key] - if !ok && apply && ty.IsMapType() { - // don't transfer old map values to dst during apply - continue - } - - if dstVal == cty.NilVal { - if !apply && ty.IsMapType() { - // let plan shape this map however it wants - continue - } - dstVal = cty.NullVal(v.Type()) - } - - dstMap[key] = normalizeNullValues(dstVal, v, apply) - } - - // you can't call MapVal/ObjectVal with empty maps, but nothing was - // copied in anyway. If the dst is nil, and the src is known, assume the - // src is correct. - if len(dstMap) == 0 { - if dst.IsNull() && src.IsWhollyKnown() && apply { - return src - } - return dst - } - - if ty.IsMapType() { - // helper/schema will populate an optional+computed map with - // unknowns which we have to fixup here. - // It would be preferable to simply prevent any known value from - // becoming unknown, but concessions have to be made to retain the - // broken legacy behavior when possible. - for k, srcVal := range srcMap { - if !srcVal.IsNull() && srcVal.IsKnown() { - dstVal, ok := dstMap[k] - if !ok { - continue - } - - if !dstVal.IsNull() && !dstVal.IsKnown() { - dstMap[k] = srcVal - } - } - } - - return cty.MapVal(dstMap) - } - - return cty.ObjectVal(dstMap) - - case ty.IsSetType(): - // If the original was wholly known, then we expect that is what the - // provider applied. The apply process loses too much information to - // reliably re-create the set. - if src.IsWhollyKnown() && apply { - return src - } - - case ty.IsListType(), ty.IsTupleType(): - // If the dst is null, and the src is known, then we lost an empty value - // so take the original. - if dst.IsNull() { - if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { - return src - } - - // if dst is null and src only contains unknown values, then we lost - // those during a read or plan. - if !apply && !src.IsNull() { - allUnknown := true - for _, v := range src.AsValueSlice() { - if v.IsKnown() { - allUnknown = false - break - } - } - if allUnknown { - return src - } - } - - return dst - } - - // if the lengths are identical, then iterate over each element in succession. - srcLen := src.LengthInt() - dstLen := dst.LengthInt() - if srcLen == dstLen && srcLen > 0 { - srcs := src.AsValueSlice() - dsts := dst.AsValueSlice() - - for i := 0; i < srcLen; i++ { - dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) - } - - if ty.IsTupleType() { - return cty.TupleVal(dsts) - } - return cty.ListVal(dsts) - } - - case ty == cty.String: - // The legacy SDK should not be able to remove a value during plan or - // apply, however we are only going to overwrite this if the source was - // an empty string, since that is what is often equated with unset and - // lost in the diff process. - if dst.IsNull() && src.AsString() == "" { - return src - } - } - - return dst -} - -// validateConfigNulls checks a config value for unsupported nulls before -// attempting to shim the value. While null values can mostly be ignored in the -// configuration, since they're not supported in HCL1, the case where a null -// appears in a list-like attribute (list, set, tuple) will present a nil value -// to helper/schema which can panic. Return an error to the user in this case, -// indicating the attribute with the null value. -func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { - var diags []*proto.Diagnostic - if v.IsNull() || !v.IsKnown() { - return diags - } - - switch { - case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - if ev.IsNull() { - // if this is a set, the kv is also going to be null which - // isn't a valid path element, so we can't append it to the - // diagnostic. - p := path - if !kv.IsNull() { - p = append(p, cty.IndexStep{Key: kv}) - } - - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "Null value found in list", - Detail: "Null values are not allowed for this attribute value.", - Attribute: convert.PathToAttributePath(p), - }) - continue - } - - d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) - diags = convert.AppendProtoDiag(diags, d) - } - - case v.Type().IsMapType() || v.Type().IsObjectType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - var step cty.PathStep - switch { - case v.Type().IsMapType(): - step = cty.IndexStep{Key: kv} - case v.Type().IsObjectType(): - step = cty.GetAttrStep{Name: kv.AsString()} - } - d := validateConfigNulls(ev, append(path, step)) - diags = convert.AppendProtoDiag(diags, d) - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go deleted file mode 100644 index 088e94e4..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go +++ /dev/null @@ -1,201 +0,0 @@ -package plugin - -import ( - "log" - "strings" - "unicode/utf8" - - "github.com/hashicorp/terraform/helper/schema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/terraform" - "github.com/zclconf/go-cty/cty" - ctyconvert "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/msgpack" - context "golang.org/x/net/context" -) - -// NewGRPCProvisionerServerShim wraps a terraform.ResourceProvisioner in a -// proto.ProvisionerServer implementation. If the provided provisioner is not a -// *schema.Provisioner, this will return nil, -func NewGRPCProvisionerServerShim(p terraform.ResourceProvisioner) *GRPCProvisionerServer { - sp, ok := p.(*schema.Provisioner) - if !ok { - return nil - } - return &GRPCProvisionerServer{ - provisioner: sp, - } -} - -type GRPCProvisionerServer struct { - provisioner *schema.Provisioner -} - -func (s *GRPCProvisionerServer) GetSchema(_ context.Context, req *proto.GetProvisionerSchema_Request) (*proto.GetProvisionerSchema_Response, error) { - resp := &proto.GetProvisionerSchema_Response{} - - resp.Provisioner = &proto.Schema{ - Block: convert.ConfigSchemaToProto(schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()), - } - - return resp, nil -} - -func (s *GRPCProvisionerServer) ValidateProvisionerConfig(_ context.Context, req *proto.ValidateProvisionerConfig_Request) (*proto.ValidateProvisionerConfig_Response, error) { - resp := &proto.ValidateProvisionerConfig_Response{} - - cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, cfgSchema) - - warns, errs := s.provisioner.Validate(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -// stringMapFromValue converts a cty.Value to a map[stirng]string. -// This will panic if the val is not a cty.Map(cty.String). -func stringMapFromValue(val cty.Value) map[string]string { - m := map[string]string{} - if val.IsNull() || !val.IsKnown() { - return m - } - - for it := val.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - - if !av.IsKnown() || av.IsNull() { - continue - } - - av, _ = ctyconvert.Convert(av, cty.String) - m[name] = av.AsString() - } - - return m -} - -// uiOutput implements the terraform.UIOutput interface to adapt the grpc -// stream to the legacy Provisioner.Apply method. -type uiOutput struct { - srv proto.Provisioner_ProvisionResourceServer -} - -func (o uiOutput) Output(s string) { - err := o.srv.Send(&proto.ProvisionResource_Response{ - Output: toValidUTF8(s, string(utf8.RuneError)), - }) - if err != nil { - log.Printf("[ERROR] %s", err) - } -} - -func (s *GRPCProvisionerServer) ProvisionResource(req *proto.ProvisionResource_Request, srv proto.Provisioner_ProvisionResourceServer) error { - // We send back a diagnostics over the stream if there was a - // provisioner-side problem. - srvResp := &proto.ProvisionResource_Response{} - - cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() - cfgVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - return nil - } - resourceConfig := terraform.NewResourceConfigShimmed(cfgVal, cfgSchema) - - connVal, err := msgpack.Unmarshal(req.Connection.Msgpack, cty.Map(cty.String)) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - return nil - } - - conn := stringMapFromValue(connVal) - - instanceState := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: conn, - }, - Meta: make(map[string]interface{}), - } - - err = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - } - return nil -} - -func (s *GRPCProvisionerServer) Stop(_ context.Context, req *proto.Stop_Request) (*proto.Stop_Response, error) { - resp := &proto.Stop_Response{} - - err := s.provisioner.Stop() - if err != nil { - resp.Error = err.Error() - } - - return resp, nil -} - -// FIXME: backported from go1.13 strings package, remove once terraform is -// using go >= 1.13 -// ToValidUTF8 returns a copy of the string s with each run of invalid UTF-8 byte sequences -// replaced by the replacement string, which may be empty. -func toValidUTF8(s, replacement string) string { - var b strings.Builder - - for i, c := range s { - if c != utf8.RuneError { - continue - } - - _, wid := utf8.DecodeRuneInString(s[i:]) - if wid == 1 { - b.Grow(len(s) + len(replacement)) - b.WriteString(s[:i]) - s = s[i:] - break - } - } - - // Fast path for unchanged input - if b.Cap() == 0 { // didn't call b.Grow above - return s - } - - invalid := false // previous byte was from an invalid UTF-8 sequence - for i := 0; i < len(s); { - c := s[i] - if c < utf8.RuneSelf { - i++ - invalid = false - b.WriteByte(c) - continue - } - _, wid := utf8.DecodeRuneInString(s[i:]) - if wid == 1 { - i++ - if !invalid { - invalid = true - b.WriteString(replacement) - } - continue - } - invalid = false - b.WriteString(s[i : i+wid]) - i += wid - } - - return b.String() -} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go deleted file mode 100644 index 64a6784e..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go +++ /dev/null @@ -1,131 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// SetUnknowns takes a cty.Value, and compares it to the schema setting any null -// values which are computed to unknown. -func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { - if !val.IsKnown() { - return val - } - - // If the object was null, we still need to handle the top level attributes - // which might be computed, but we don't need to expand the blocks. - if val.IsNull() { - objMap := map[string]cty.Value{} - allNull := true - for name, attr := range schema.Attributes { - switch { - case attr.Computed: - objMap[name] = cty.UnknownVal(attr.Type) - allNull = false - default: - objMap[name] = cty.NullVal(attr.Type) - } - } - - // If this object has no unknown attributes, then we can leave it null. - if allNull { - return val - } - - return cty.ObjectVal(objMap) - } - - valMap := val.AsValueMap() - newVals := make(map[string]cty.Value) - - for name, attr := range schema.Attributes { - v := valMap[name] - - if attr.Computed && v.IsNull() { - newVals[name] = cty.UnknownVal(attr.Type) - continue - } - - newVals[name] = v - } - - for name, blockS := range schema.BlockTypes { - blockVal := valMap[name] - if blockVal.IsNull() || !blockVal.IsKnown() { - newVals[name] = blockVal - continue - } - - blockValType := blockVal.Type() - blockElementType := blockS.Block.ImpliedType() - - // This switches on the value type here, so we can correctly switch - // between Tuples/Lists and Maps/Objects. - switch { - case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: - // NestingSingle is the only exception here, where we treat the - // block directly as an object - newVals[name] = SetUnknowns(blockVal, &blockS.Block) - - case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): - listVals := blockVal.AsValueSlice() - newListVals := make([]cty.Value, 0, len(listVals)) - - for _, v := range listVals { - newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) - } - - switch { - case blockValType.IsSetType(): - switch len(newListVals) { - case 0: - newVals[name] = cty.SetValEmpty(blockElementType) - default: - newVals[name] = cty.SetVal(newListVals) - } - case blockValType.IsListType(): - switch len(newListVals) { - case 0: - newVals[name] = cty.ListValEmpty(blockElementType) - default: - newVals[name] = cty.ListVal(newListVals) - } - case blockValType.IsTupleType(): - newVals[name] = cty.TupleVal(newListVals) - } - - case blockValType.IsMapType(), blockValType.IsObjectType(): - mapVals := blockVal.AsValueMap() - newMapVals := make(map[string]cty.Value) - - for k, v := range mapVals { - newMapVals[k] = SetUnknowns(v, &blockS.Block) - } - - switch { - case blockValType.IsMapType(): - switch len(newMapVals) { - case 0: - newVals[name] = cty.MapValEmpty(blockElementType) - default: - newVals[name] = cty.MapVal(newMapVals) - } - case blockValType.IsObjectType(): - if len(newMapVals) == 0 { - // We need to populate empty values to make a valid object. - for attr, ty := range blockElementType.AttributeTypes() { - newMapVals[attr] = cty.NullVal(ty) - } - } - newVals[name] = cty.ObjectVal(newMapVals) - } - - default: - panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) - } - } - - return cty.ObjectVal(newVals) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go deleted file mode 100644 index 7ee21614..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/error.go +++ /dev/null @@ -1,79 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "time" -) - -type NotFoundError struct { - LastError error - LastRequest interface{} - LastResponse interface{} - Message string - Retries int -} - -func (e *NotFoundError) Error() string { - if e.Message != "" { - return e.Message - } - - if e.Retries > 0 { - return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) - } - - return "couldn't find resource" -} - -// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending -type UnexpectedStateError struct { - LastError error - State string - ExpectedState []string -} - -func (e *UnexpectedStateError) Error() string { - return fmt.Sprintf( - "unexpected state '%s', wanted target '%s'. last error: %s", - e.State, - strings.Join(e.ExpectedState, ", "), - e.LastError, - ) -} - -// TimeoutError is returned when WaitForState times out -type TimeoutError struct { - LastError error - LastState string - Timeout time.Duration - ExpectedState []string -} - -func (e *TimeoutError) Error() string { - expectedState := "resource to be gone" - if len(e.ExpectedState) > 0 { - expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) - } - - extraInfo := make([]string, 0) - if e.LastState != "" { - extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) - } - if e.Timeout > 0 { - extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) - } - - suffix := "" - if len(extraInfo) > 0 { - suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) - } - - if e.LastError != nil { - return fmt.Sprintf("timeout while waiting for %s%s: %s", - expectedState, suffix, e.LastError) - } - - return fmt.Sprintf("timeout while waiting for %s%s", - expectedState, suffix) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go deleted file mode 100644 index 0742e993..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package resource - -import ( - "context" - "net" - "time" - - "github.com/hashicorp/terraform/helper/plugin" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - tfplugin "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC -// shim and starts it in a grpc server using an inmem connection. It returns a -// GRPCClient for this new server to test the shimmed resource provider. -func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { - listener := bufconn.Listen(256 * 1024) - grpcServer := grpc.NewServer() - - p := plugin.NewGRPCProviderServerShim(rp) - proto.RegisterProviderServer(grpcServer, p) - - go grpcServer.Serve(listener) - - conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { - return listener.Dial() - }), grpc.WithInsecure()) - if err != nil { - panic(err) - } - - var pp tfplugin.GRPCProviderPlugin - client, _ := pp.GRPCClient(context.Background(), nil, conn) - - grpcClient := client.(*tfplugin.GRPCProvider) - grpcClient.TestServer = grpcServer - - return grpcClient -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go deleted file mode 100644 index a465136f..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/map.go +++ /dev/null @@ -1,140 +0,0 @@ -package resource - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform/terraform" -) - -// Map is a map of resources that are supported, and provides helpers for -// more easily implementing a ResourceProvider. -type Map struct { - Mapping map[string]Resource -} - -func (m *Map) Validate( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := m.Mapping[t] - if !ok { - return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} - } - - // If there is no validator set, then it is valid - if r.ConfigValidator == nil { - return nil, nil - } - - return r.ConfigValidator.Validate(c) -} - -// Apply performs a create or update depending on the diff, and calls -// the proper function on the matching Resource. -func (m *Map) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource if it is created - err := r.Destroy(s, meta) - if err != nil { - return s, err - } - - s.ID = "" - } - - // If we're only destroying, and not creating, then return now. - // Otherwise, we continue so that we can create a new resource. - if !d.RequiresNew() { - return nil, nil - } - } - - var result *terraform.InstanceState - var err error - if s.ID == "" { - result, err = r.Create(s, d, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf( - "Resource type '%s' doesn't support update", - info.Type) - } - - result, err = r.Update(s, d, meta) - } - if result != nil { - if result.Attributes == nil { - result.Attributes = make(map[string]string) - } - - result.Attributes["id"] = result.ID - } - - return result, err -} - -// Diff performs a diff on the proper resource type. -func (m *Map) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, meta) -} - -// Refresh performs a Refresh on the proper resource type. -// -// Refresh on the Resource won't be called if the state represents a -// non-created resource (ID is blank). -// -// An error is returned if the resource isn't registered. -func (m *Map) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the resource isn't created, don't refresh. - if s.ID == "" { - return s, nil - } - - r, ok := m.Mapping[info.Type] - if !ok { - return nil, fmt.Errorf("Unknown resource type: %s", info.Type) - } - - return r.Refresh(s, meta) -} - -// Resources returns all the resources that are supported by this -// resource map and can be used to satisfy the Resources method of -// a ResourceProvider. -func (m *Map) Resources() []terraform.ResourceType { - ks := make([]string, 0, len(m.Mapping)) - for k, _ := range m.Mapping { - ks = append(ks, k) - } - sort.Strings(ks) - - rs := make([]terraform.ResourceType, 0, len(m.Mapping)) - for _, k := range ks { - rs = append(rs, terraform.ResourceType{ - Name: k, - }) - } - - return rs -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go deleted file mode 100644 index 0d9c831a..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go +++ /dev/null @@ -1,49 +0,0 @@ -package resource - -import ( - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/terraform" -) - -type Resource struct { - ConfigValidator *config.Validator - Create CreateFunc - Destroy DestroyFunc - Diff DiffFunc - Refresh RefreshFunc - Update UpdateFunc -} - -// CreateFunc is a function that creates a resource that didn't previously -// exist. -type CreateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) - -// DestroyFunc is a function that destroys a resource that previously -// exists using the state. -type DestroyFunc func( - *terraform.InstanceState, - interface{}) error - -// DiffFunc is a function that performs a diff of a resource. -type DiffFunc func( - *terraform.InstanceState, - *terraform.ResourceConfig, - interface{}) (*terraform.InstanceDiff, error) - -// RefreshFunc is a function that performs a refresh of a specific type -// of resource. -type RefreshFunc func( - *terraform.InstanceState, - interface{}) (*terraform.InstanceState, error) - -// UpdateFunc is a function that is called to update a resource that -// previously existed. The difference between this and CreateFunc is that -// the diff is guaranteed to only contain attributes that don't require -// a new resource. -type UpdateFunc func( - *terraform.InstanceState, - *terraform.InstanceDiff, - interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go deleted file mode 100644 index 88a83966..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ /dev/null @@ -1,259 +0,0 @@ -package resource - -import ( - "log" - "time" -) - -var refreshGracePeriod = 30 * time.Second - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an EC2 instance after refreshing -// it. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, err error) - -// StateChangeConf is the configuration struct used for `WaitForState`. -type StateChangeConf struct { - Delay time.Duration // Wait this time before starting checks - Pending []string // States that are "allowed" and will continue trying - Refresh StateRefreshFunc // Refreshes the current state - Target []string // Target state - Timeout time.Duration // The amount of time to wait before timeout - MinTimeout time.Duration // Smallest time to wait before refreshes - PollInterval time.Duration // Override MinTimeout/backoff and only poll this often - NotFoundChecks int // Number of times to allow not found - - // This is to work around inconsistent APIs - ContinuousTargetOccurence int // Number of times the Target state has to occur continuously -} - -// WaitForState watches an object and waits for it to achieve the state -// specified in the configuration using the specified Refresh() func, -// waiting the number of seconds specified in the timeout configuration. -// -// If the Refresh function returns an error, exit immediately with that error. -// -// If the Refresh function returns a state other than the Target state or one -// listed in Pending, return immediately with an error. -// -// If the Timeout is exceeded before reaching the Target state, return an -// error. -// -// Otherwise, the result is the result of the first call to the Refresh function to -// reach the target state. -func (conf *StateChangeConf) WaitForState() (interface{}, error) { - log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) - - notfoundTick := 0 - targetOccurence := 0 - - // Set a default for times to check for not found - if conf.NotFoundChecks == 0 { - conf.NotFoundChecks = 20 - } - - if conf.ContinuousTargetOccurence == 0 { - conf.ContinuousTargetOccurence = 1 - } - - type Result struct { - Result interface{} - State string - Error error - Done bool - } - - // Read every result from the refresh loop, waiting for a positive result.Done. - resCh := make(chan Result, 1) - // cancellation channel for the refresh loop - cancelCh := make(chan struct{}) - - result := Result{} - - go func() { - defer close(resCh) - - time.Sleep(conf.Delay) - - // start with 0 delay for the first loop - var wait time.Duration - - for { - // store the last result - resCh <- result - - // wait and watch for cancellation - select { - case <-cancelCh: - return - case <-time.After(wait): - // first round had no wait - if wait == 0 { - wait = 100 * time.Millisecond - } - } - - res, currentState, err := conf.Refresh() - result = Result{ - Result: res, - State: currentState, - Error: err, - } - - if err != nil { - resCh <- result - return - } - - // If we're waiting for the absence of a thing, then return - if res == nil && len(conf.Target) == 0 { - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - - if res == nil { - // If we didn't find the resource, check if we have been - // not finding it for awhile, and if so, report an error. - notfoundTick++ - if notfoundTick > conf.NotFoundChecks { - result.Error = &NotFoundError{ - LastError: err, - Retries: notfoundTick, - } - resCh <- result - return - } - } else { - // Reset the counter for when a resource isn't found - notfoundTick = 0 - found := false - - for _, allowed := range conf.Target { - if currentState == allowed { - found = true - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - } - - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - targetOccurence = 0 - break - } - } - - if !found && len(conf.Pending) > 0 { - result.Error = &UnexpectedStateError{ - LastError: err, - State: result.State, - ExpectedState: conf.Target, - } - resCh <- result - return - } - } - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } - - // If a poll interval has been specified, choose that interval. - // Otherwise bound the default value. - if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { - wait = conf.PollInterval - } else { - if wait < conf.MinTimeout { - wait = conf.MinTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second - } - } - - log.Printf("[TRACE] Waiting %s before next try", wait) - } - }() - - // store the last value result from the refresh loop - lastResult := Result{} - - timeout := time.After(conf.Timeout) - for { - select { - case r, ok := <-resCh: - // channel closed, so return the last result - if !ok { - return lastResult.Result, lastResult.Error - } - - // we reached the intended state - if r.Done { - return r.Result, r.Error - } - - // still waiting, store the last result - lastResult = r - - case <-timeout: - log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) - log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) - - // cancel the goroutine and start our grace period timer - close(cancelCh) - timeout := time.After(refreshGracePeriod) - - // we need a for loop and a label to break on, because we may have - // an extra response value to read, but still want to wait for the - // channel to close. - forSelect: - for { - select { - case r, ok := <-resCh: - if r.Done { - // the last refresh loop reached the desired state - return r.Result, r.Error - } - - if !ok { - // the goroutine returned - break forSelect - } - - // target state not reached, save the result for the - // TimeoutError and wait for the channel to close - lastResult = r - case <-timeout: - log.Println("[ERROR] WaitForState exceeded refresh grace period") - break forSelect - } - } - - return nil, &TimeoutError{ - LastError: lastResult.Error, - LastState: lastResult.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go deleted file mode 100644 index 257109d3..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go +++ /dev/null @@ -1,188 +0,0 @@ -package resource - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests -func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { - state := terraform.NewState() - - // in the odd case of a nil state, let the helper packages handle it - if newState == nil { - return nil, nil - } - - for _, newMod := range newState.Modules { - mod := state.AddModule(newMod.Addr) - - for name, out := range newMod.OutputValues { - outputType := "" - val := hcl2shim.ConfigValueFromHCL2(out.Value) - ty := out.Value.Type() - switch { - case ty == cty.String: - outputType = "string" - case ty.IsTupleType() || ty.IsListType(): - outputType = "list" - case ty.IsMapType(): - outputType = "map" - } - - mod.Outputs[name] = &terraform.OutputState{ - Type: outputType, - Value: val, - Sensitive: out.Sensitive, - } - } - - for _, res := range newMod.Resources { - resType := res.Addr.Type - providerType := res.ProviderConfig.ProviderConfig.Type - - resource := getResource(providers, providerType.LegacyString(), res.Addr) - - for key, i := range res.Instances { - resState := &terraform.ResourceState{ - Type: resType, - Provider: res.ProviderConfig.String(), - } - - // We should always have a Current instance here, but be safe about checking. - if i.Current != nil { - flatmap, err := shimmedAttributes(i.Current, resource) - if err != nil { - return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if i.Current.Private != nil { - err := json.Unmarshal(i.Current.Private, &meta) - if err != nil { - return nil, err - } - } - - resState.Primary = &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: i.Current.Status == states.ObjectTainted, - Meta: meta, - } - - if i.Current.SchemaVersion != 0 { - if resState.Primary.Meta == nil { - resState.Primary.Meta = map[string]interface{}{} - } - resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion - } - - for _, dep := range i.Current.DependsOn { - resState.Dependencies = append(resState.Dependencies, dep.String()) - } - - // convert the indexes to the old style flapmap indexes - idx := "" - switch key.(type) { - case addrs.IntKey: - // don't add numeric index values to resources with a count of 0 - if len(res.Instances) > 1 { - idx = fmt.Sprintf(".%d", key) - } - case addrs.StringKey: - idx = "." + key.String() - } - - mod.Resources[res.Addr.String()+idx] = resState - } - - // add any deposed instances - for _, dep := range i.Deposed { - flatmap, err := shimmedAttributes(dep, resource) - if err != nil { - return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if dep.Private != nil { - err := json.Unmarshal(dep.Private, &meta) - if err != nil { - return nil, err - } - } - - deposed := &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: dep.Status == states.ObjectTainted, - Meta: meta, - } - if dep.SchemaVersion != 0 { - deposed.Meta = map[string]interface{}{ - "schema_version": dep.SchemaVersion, - } - } - - resState.Deposed = append(resState.Deposed, deposed) - } - } - } - } - - return state, nil -} - -func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { - p := providers[providerName] - if p == nil { - panic(fmt.Sprintf("provider %q not found in test step", providerName)) - } - - // this is only for tests, so should only see schema.Providers - provider := p.(*schema.Provider) - - switch addr.Mode { - case addrs.ManagedResourceMode: - resource := provider.ResourcesMap[addr.Type] - if resource != nil { - return resource - } - case addrs.DataResourceMode: - resource := provider.DataSourcesMap[addr.Type] - if resource != nil { - return resource - } - } - - panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) -} - -func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { - flatmap := instance.AttrsFlat - if flatmap != nil { - return flatmap, nil - } - - // if we have json attrs, they need to be decoded - rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) - if err != nil { - return nil, err - } - - instanceState, err := res.ShimInstanceStateFromValue(rio.Value) - if err != nil { - return nil, err - } - - return instanceState.Attributes, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go deleted file mode 100644 index 1119144b..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ /dev/null @@ -1,1320 +0,0 @@ -package resource - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "regexp" - "strings" - "syscall" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/colorstring" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configload" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" -) - -// flagSweep is a flag available when running tests on the command line. It -// contains a comma seperated list of regions to for the sweeper functions to -// run in. This flag bypasses the normal Test path and instead runs functions designed to -// clean up any leaked resources a testing environment could have created. It is -// a best effort attempt, and relies on Provider authors to implement "Sweeper" -// methods for resources. - -// Adding Sweeper methods with AddTestSweepers will -// construct a list of sweeper funcs to be called here. We iterate through -// regions provided by the sweep flag, and for each region we iterate through the -// tests, and exit on any errors. At time of writing, sweepers are ran -// sequentially, however they can list dependencies to be ran first. We track -// the sweepers that have been ran, so as to not run a sweeper twice for a given -// region. -// -// WARNING: -// Sweepers are designed to be destructive. You should not use the -sweep flag -// in any environment that is not strictly a test environment. Resources will be -// destroyed. - -var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") -var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") -var sweeperFuncs map[string]*Sweeper - -// map of sweepers that have ran, and the success/fail status based on any error -// raised -var sweeperRunList map[string]bool - -// type SweeperFunc is a signature for a function that acts as a sweeper. It -// accepts a string for the region that the sweeper is to be ran in. This -// function must be able to construct a valid client for that region. -type SweeperFunc func(r string) error - -type Sweeper struct { - // Name for sweeper. Must be unique to be ran by the Sweeper Runner - Name string - - // Dependencies list the const names of other Sweeper functions that must be ran - // prior to running this Sweeper. This is an ordered list that will be invoked - // recursively at the helper/resource level - Dependencies []string - - // Sweeper function that when invoked sweeps the Provider of specific - // resources - F SweeperFunc -} - -func init() { - sweeperFuncs = make(map[string]*Sweeper) -} - -// AddTestSweepers function adds a given name and Sweeper configuration -// pair to the internal sweeperFuncs map. Invoke this function to register a -// resource sweeper to be available for running when the -sweep flag is used -// with `go test`. Sweeper names must be unique to help ensure a given sweeper -// is only ran once per run. -func AddTestSweepers(name string, s *Sweeper) { - if _, ok := sweeperFuncs[name]; ok { - log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) - } - - sweeperFuncs[name] = s -} - -func TestMain(m *testing.M) { - flag.Parse() - if *flagSweep != "" { - // parse flagSweep contents for regions to run - regions := strings.Split(*flagSweep, ",") - - // get filtered list of sweepers to run based on sweep-run flag - sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) - for _, region := range regions { - region = strings.TrimSpace(region) - // reset sweeperRunList for each region - sweeperRunList = map[string]bool{} - - log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) - for _, sweeper := range sweepers { - if err := runSweeperWithRegion(region, sweeper); err != nil { - log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) - } - } - - log.Printf("Sweeper Tests ran:\n") - for s, _ := range sweeperRunList { - fmt.Printf("\t- %s\n", s) - } - } - } else { - os.Exit(m.Run()) - } -} - -// filterSweepers takes a comma seperated string listing the names of sweepers -// to be ran, and returns a filtered set from the list of all of sweepers to -// run based on the names given. -func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { - filterSlice := strings.Split(strings.ToLower(f), ",") - if len(filterSlice) == 1 && filterSlice[0] == "" { - // if the filter slice is a single element of "" then no sweeper list was - // given, so just return the full list - return source - } - - sweepers := make(map[string]*Sweeper) - for name, sweeper := range source { - for _, s := range filterSlice { - if strings.Contains(strings.ToLower(name), s) { - sweepers[name] = sweeper - } - } - } - return sweepers -} - -// runSweeperWithRegion recieves a sweeper and a region, and recursively calls -// itself with that region for every dependency found for that sweeper. If there -// are no dependencies, invoke the contained sweeper fun with the region, and -// add the success/fail status to the sweeperRunList. -func runSweeperWithRegion(region string, s *Sweeper) error { - for _, dep := range s.Dependencies { - if depSweeper, ok := sweeperFuncs[dep]; ok { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) - if err := runSweeperWithRegion(region, depSweeper); err != nil { - return err - } - } else { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) - } - } - - if _, ok := sweeperRunList[s.Name]; ok { - log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) - return nil - } - - runE := s.F(region) - if runE == nil { - sweeperRunList[s.Name] = true - } else { - sweeperRunList[s.Name] = false - } - - return runE -} - -const TestEnvVar = "TF_ACC" - -// TestProvider can be implemented by any ResourceProvider to provide custom -// reset functionality at the start of an acceptance test. -// The helper/schema Provider implements this interface. -type TestProvider interface { - TestReset() error -} - -// TestCheckFunc is the callback type used with acceptance tests to check -// the state of a resource. The state passed in is the latest state known, -// or in the case of being after a destroy, it is the last known state when -// it was created. -type TestCheckFunc func(*terraform.State) error - -// ImportStateCheckFunc is the check function for ImportState tests -type ImportStateCheckFunc func([]*terraform.InstanceState) error - -// ImportStateIdFunc is an ID generation function to help with complex ID -// generation for ImportState tests. -type ImportStateIdFunc func(*terraform.State) (string, error) - -// TestCase is a single acceptance test case used to test the apply/destroy -// lifecycle of a resource in a specific configuration. -// -// When the destroy plan is executed, the config from the last TestStep -// is used to plan it. -type TestCase struct { - // IsUnitTest allows a test to run regardless of the TF_ACC - // environment variable. This should be used with care - only for - // fast tests on local resources (e.g. remote state with a local - // backend) but can be used to increase confidence in correct - // operation of Terraform without waiting for a full acctest run. - IsUnitTest bool - - // PreCheck, if non-nil, will be called before any test steps are - // executed. It will only be executed in the case that the steps - // would run, so it can be used for some validation before running - // acceptance tests, such as verifying that keys are setup. - PreCheck func() - - // Providers is the ResourceProvider that will be under test. - // - // Alternately, ProviderFactories can be specified for the providers - // that are valid. This takes priority over Providers. - // - // The end effect of each is the same: specifying the providers that - // are used within the tests. - Providers map[string]terraform.ResourceProvider - ProviderFactories map[string]terraform.ResourceProviderFactory - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // CheckDestroy is called after the resource is finally destroyed - // to allow the tester to test that the resource is truly gone. - CheckDestroy TestCheckFunc - - // Steps are the apply sequences done within the context of the - // same state. Each step can have its own check to verify correctness. - Steps []TestStep - - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. - // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string - IDRefreshIgnore []string -} - -// TestStep is a single apply sequence of a test, done within the -// context of a state. -// -// Multiple TestSteps can be sequenced in a Test to allow testing -// potentially complex update logic. In general, simply create/destroy -// tests will only need one step. -type TestStep struct { - // ResourceName should be set to the name of the resource - // that is being tested. Example: "aws_instance.foo". Various test - // modes use this to auto-detect state information. - // - // This is only required if the test mode settings below say it is - // for the mode you're using. - ResourceName string - - // PreConfig is called before the Config is applied to perform any per-step - // setup that needs to happen. This is called regardless of "test mode" - // below. - PreConfig func() - - // Taint is a list of resource addresses to taint prior to the execution of - // the step. Be sure to only include this at a step where the referenced - // address will be present in state, as it will fail the test if the resource - // is missing. - // - // This option is ignored on ImportState tests, and currently only works for - // resources in the root module path. - Taint []string - - //--------------------------------------------------------------- - // Test modes. One of the following groups of settings must be - // set to determine what the test step will do. Ideally we would've - // used Go interfaces here but there are now hundreds of tests we don't - // want to re-type so instead we just determine which step logic - // to run based on what settings below are set. - //--------------------------------------------------------------- - - //--------------------------------------------------------------- - // Plan, Apply testing - //--------------------------------------------------------------- - - // Config a string of the configuration to give to Terraform. If this - // is set, then the TestCase will execute this step with the same logic - // as a `terraform apply`. - Config string - - // Check is called after the Config is applied. Use this step to - // make your own API calls to check the status of things, and to - // inspect the format of the ResourceState itself. - // - // If an error is returned, the test will fail. In this case, a - // destroy plan will still be attempted. - // - // If this is nil, no check is done on this step. - Check TestCheckFunc - - // Destroy will create a destroy plan if set to true. - Destroy bool - - // ExpectNonEmptyPlan can be set to true for specific types of tests that are - // looking to verify that a diff occurs - ExpectNonEmptyPlan bool - - // ExpectError allows the construction of test cases that we expect to fail - // with an error. The specified regexp must match against the error for the - // test to pass. - ExpectError *regexp.Regexp - - // PlanOnly can be set to only run `plan` with this configuration, and not - // actually apply it. This is useful for ensuring config changes result in - // no-op plans - PlanOnly bool - - // PreventDiskCleanup can be set to true for testing terraform modules which - // require access to disk at runtime. Note that this will leave files in the - // temp folder - PreventDiskCleanup bool - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // SkipFunc is called before applying config, but after PreConfig - // This is useful for defining test steps with platform-dependent checks - SkipFunc func() (bool, error) - - //--------------------------------------------------------------- - // ImportState testing - //--------------------------------------------------------------- - - // ImportState, if true, will test the functionality of ImportState - // by importing the resource with ResourceName (must be set) and the - // ID of that resource. - ImportState bool - - // ImportStateId is the ID to perform an ImportState operation with. - // This is optional. If it isn't set, then the resource ID is automatically - // determined by inspecting the state for ResourceName's ID. - ImportStateId string - - // ImportStateIdPrefix is the prefix added in front of ImportStateId. - // This can be useful in complex import cases, where more than one - // attribute needs to be passed on as the Import ID. Mainly in cases - // where the ID is not known, and a known prefix needs to be added to - // the unset ImportStateId field. - ImportStateIdPrefix string - - // ImportStateIdFunc is a function that can be used to dynamically generate - // the ID for the ImportState tests. It is sent the state, which can be - // checked to derive the attributes necessary and generate the string in the - // desired format. - ImportStateIdFunc ImportStateIdFunc - - // ImportStateCheck checks the results of ImportState. It should be - // used to verify that the resulting value of ImportState has the - // proper resources, IDs, and attributes. - ImportStateCheck ImportStateCheckFunc - - // ImportStateVerify, if true, will also check that the state values - // that are finally put into the state after import match for all the - // IDs returned by the Import. Note that this checks for strict equality - // and does not respect DiffSuppressFunc or CustomizeDiff. - // - // ImportStateVerifyIgnore is a list of prefixes of fields that should - // not be verified to be equal. These can be set to ephemeral fields or - // fields that can't be refreshed and don't matter. - ImportStateVerify bool - ImportStateVerifyIgnore []string - - // provider s is used internally to maintain a reference to the - // underlying providers during the tests - providers map[string]terraform.ResourceProvider -} - -// Set to a file mask in sprintf format where %s is test name -const EnvLogPathMask = "TF_LOG_PATH_MASK" - -func LogOutput(t TestT) (logOutput io.Writer, err error) { - logOutput = ioutil.Discard - - logLevel := logging.CurrentLogLevel() - if logLevel == "" { - return - } - - logOutput = os.Stderr - - if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { - // Escape special characters which may appear if we have subtests - testName := strings.Replace(t.Name(), "/", "__", -1) - - logPath := fmt.Sprintf(logPathMask, testName) - var err error - logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) - if err != nil { - return nil, err - } - } - - // This was the default since the beginning - logOutput = &logging.LevelFilter{ - Levels: logging.ValidLevels, - MinLevel: logging.LogLevel(logLevel), - Writer: logOutput, - } - - return -} - -// ParallelTest performs an acceptance test on a resource, allowing concurrency -// with other ParallelTest. -// -// Tests will fail if they do not properly handle conditions to allow multiple -// tests to occur against the same resource or service (e.g. random naming). -// All other requirements of the Test function also apply to this function. -func ParallelTest(t TestT, c TestCase) { - t.Parallel() - Test(t, c) -} - -// Test performs an acceptance test on a resource. -// -// Tests are not run unless an environmental variable "TF_ACC" is -// set to some non-empty value. This is to avoid test cases surprising -// a user by creating real resources. -// -// Tests will fail unless the verbose flag (`go test -v`, or explicitly -// the "-test.v" flag) is set. Because some acceptance tests take quite -// long, we require the verbose flag so users are able to see progress -// output. -func Test(t TestT, c TestCase) { - // We only run acceptance tests if an env var is set because they're - // slow and generally require some outside configuration. You can opt out - // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) - return - } - - logWriter, err := LogOutput(t) - if err != nil { - t.Error(fmt.Errorf("error setting up logging: %s", err)) - } - log.SetOutput(logWriter) - - // We require verbose mode so that the user knows what is going on. - if !testTesting && !testing.Verbose() && !c.IsUnitTest { - t.Fatal("Acceptance tests must be run with the -v flag on tests") - return - } - - // Run the PreCheck if we have it - if c.PreCheck != nil { - c.PreCheck() - } - - // get instances of all providers, so we can use the individual - // resources to shim the state during the tests. - providers := make(map[string]terraform.ResourceProvider) - for name, pf := range testProviderFactories(c) { - p, err := pf() - if err != nil { - t.Fatal(err) - } - providers[name] = p - } - - providerResolver, err := testProviderResolver(c) - if err != nil { - t.Fatal(err) - } - - opts := terraform.ContextOpts{ProviderResolver: providerResolver} - - // A single state variable to track the lifecycle, starting with no state - var state *terraform.State - - // Go through each step and run it - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - errored := false - for i, step := range c.Steps { - // insert the providers into the step so we can get the resources for - // shimming the state - step.providers = providers - - var err error - log.Printf("[DEBUG] Test: Executing step %d", i) - - if step.SkipFunc != nil { - skip, err := step.SkipFunc() - if err != nil { - t.Fatal(err) - } - if skip { - log.Printf("[WARN] Skipping step %d", i) - continue - } - } - - if step.Config == "" && !step.ImportState { - err = fmt.Errorf( - "unknown test mode for step. Please see TestStep docs\n\n%#v", - step) - } else { - if step.ImportState { - if step.Config == "" { - step.Config = testProviderConfig(c) - } - - // Can optionally set step.Config in addition to - // step.ImportState, to provide config for the import. - state, err = testStepImportState(opts, state, step) - } else { - state, err = testStepConfig(opts, state, step) - } - } - - // If we expected an error, but did not get one, fail - if err == nil && step.ExpectError != nil { - errored = true - t.Error(fmt.Sprintf( - "Step %d, no error received, but expected a match to:\n\n%s\n\n", - i, step.ExpectError)) - break - } - - // If there was an error, exit - if err != nil { - // Perhaps we expected an error? Check if it matches - if step.ExpectError != nil { - if !step.ExpectError.MatchString(err.Error()) { - errored = true - t.Error(fmt.Sprintf( - "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", - i, err, step.ExpectError)) - break - } - } else { - errored = true - t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) - break - } - } - - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - log.Printf( - "[WARN] Test: Running ID-only refresh check on %s", - idRefreshCheck.Primary.ID) - if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { - log.Printf("[ERROR] Test: ID-only test failed: %s", err) - t.Error(fmt.Sprintf( - "[ERROR] Test: ID-only test failed: %s", err)) - break - } - } - } - } - - // If we never checked an id-only refresh, it is a failure. - if idRefresh { - if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { - t.Error("ID-only refresh check never ran.") - } - } - - // If we have a state, then run the destroy - if state != nil { - lastStep := c.Steps[len(c.Steps)-1] - destroyStep := TestStep{ - Config: lastStep.Config, - Check: c.CheckDestroy, - Destroy: true, - PreventDiskCleanup: lastStep.PreventDiskCleanup, - PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, - providers: providers, - } - - log.Printf("[WARN] Test: Executing destroy step") - state, err := testStep(opts, state, destroyStep) - if err != nil { - t.Error(fmt.Sprintf( - "Error destroying resource! WARNING: Dangling resources\n"+ - "may exist. The full state and error is shown below.\n\n"+ - "Error: %s\n\nState: %s", - err, - state)) - } - } else { - log.Printf("[WARN] Skipping destroy test since there is no state.") - } -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) string { - var lines []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - - return strings.Join(lines, "") -} - -// testProviderFactories combines the fixed Providers and -// ResourceProviderFactory functions into a single map of -// ResourceProviderFactory functions. -func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { - ctxProviders := make(map[string]terraform.ResourceProviderFactory) - for k, pf := range c.ProviderFactories { - ctxProviders[k] = pf - } - - // add any fixed providers - for k, p := range c.Providers { - ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) - } - return ctxProviders -} - -// testProviderResolver is a helper to build a ResourceProviderResolver -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderResolver(c TestCase) (providers.Resolver, error) { - ctxProviders := testProviderFactories(c) - - // wrap the old provider factories in the test grpc server so they can be - // called from terraform. - newProviders := make(map[addrs.Provider]providers.Factory) - - for k, pf := range ctxProviders { - factory := pf // must copy to ensure each closure sees its own value - newProviders[addrs.NewLegacyProvider(k)] = func() (providers.Interface, error) { - p, err := factory() - if err != nil { - return nil, err - } - - // The provider is wrapped in a GRPCTestProvider so that it can be - // passed back to terraform core as a providers.Interface, rather - // than the legacy ResourceProvider. - return GRPCTestProvider(p), nil - } - } - - return providers.ResolverFixed(newProviders), nil -} - -// UnitTest is a helper to force the acceptance testing harness to run in the -// normal unit test suite. This should only be used for resource that don't -// have any external dependencies. -func UnitTest(t TestT, c TestCase) { - c.IsUnitTest = true - Test(t, c) -} - -func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { - // TODO: We guard by this right now so master doesn't explode. We - // need to remove this eventually to make this part of the normal tests. - if os.Getenv("TF_ACC_IDONLY") == "" { - return nil - } - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: r.Type, - Name: "foo", - }.Instance(addrs.NoKey) - absAddr := addr.Absolute(addrs.RootModuleInstance) - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := states.NewState() - state.RootModule().SetResourceInstanceCurrent( - addr, - &states.ResourceInstanceObjectSrc{ - AttrsFlat: r.Primary.Attributes, - Status: states.ObjectReady, - }, - addrs.ProviderConfig{Type: addrs.NewLegacyProvider("placeholder")}.Absolute(addrs.RootModuleInstance), - ) - - // Create the config module. We use the full config because Refresh - // doesn't have access to it and we may need things like provider - // configurations. The initial implementation of id-only checks used - // an empty config module, but that caused the aforementioned problems. - cfg, err := testConfig(opts, step) - if err != nil { - return err - } - - // Initialize the context - opts.Config = cfg - opts.State = state - ctx, ctxDiags := terraform.NewContext(&opts) - if ctxDiags.HasErrors() { - return ctxDiags.Err() - } - if diags := ctx.Validate(); len(diags) > 0 { - if diags.HasErrors() { - return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) - } - - // Refresh! - state, refreshDiags := ctx.Refresh() - if refreshDiags.HasErrors() { - return refreshDiags.Err() - } - - // Verify attribute equivalence. - actualR := state.ResourceInstance(absAddr) - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Current == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Current.AttrsFlat - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k, _ := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k, _ := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} - -func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { - if step.PreConfig != nil { - step.PreConfig() - } - - cfgPath, err := ioutil.TempDir("", "tf-test") - if err != nil { - return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) - } - - if step.PreventDiskCleanup { - log.Printf("[INFO] Skipping defer os.RemoveAll call") - } else { - defer os.RemoveAll(cfgPath) - } - - // Write the main configuration file - err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating temporary file for config: %s", err) - } - - // Create directory for our child modules, if any. - modulesDir := filepath.Join(cfgPath, ".modules") - err = os.Mkdir(modulesDir, os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating child modules directory: %s", err) - } - - inst := initwd.NewModuleInstaller(modulesDir, nil) - _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) - if installDiags.HasErrors() { - return nil, installDiags.Err() - } - - loader, err := configload.NewLoader(&configload.Config{ - ModulesDir: modulesDir, - }) - if err != nil { - return nil, fmt.Errorf("failed to create config loader: %s", err) - } - - config, configDiags := loader.LoadConfig(cfgPath) - if configDiags.HasErrors() { - return nil, configDiags - } - - return config, nil -} - -func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.ResourceName]; ok { - return v, nil - } - } - } - - return nil, fmt.Errorf( - "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) -} - -// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - for i, f := range fs { - if err := f(s); err != nil { - return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) - } - } - - return nil - } -} - -// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -// -// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the -// TestCheckFuncs and aggregates failures. -func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - var result *multierror.Error - - for i, f := range fs { - if err := f(s); err != nil { - result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) - } - } - - return result.ErrorOrNil() - } -} - -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. -func TestCheckResourceAttrSet(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - } -} - -// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with -// support for non-root modules -func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - } -} - -func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { - if val, ok := is.Attributes[key]; !ok || val == "" { - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) - } - - return nil -} - -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. -func TestCheckResourceAttr(name, key, value string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - } -} - -// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with -// support for non-root modules -func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - } -} - -func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { - // Empty containers may be elided from the state. - // If the intent here is to check for an empty container, allow the key to - // also be non-existent. - emptyCheck := false - if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - emptyCheck = true - } - - if v, ok := is.Attributes[key]; !ok || v != value { - if emptyCheck && !ok { - return nil - } - - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } - - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) - } - return nil -} - -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. -func TestCheckNoResourceAttr(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - } -} - -// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with -// support for non-root modules -func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - } -} - -func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { - // Empty containers may sometimes be included in the state. - // If the intent here is to check for an empty container, allow the value to - // also be "0". - emptyCheck := false - if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { - emptyCheck = true - } - - val, exists := is.Attributes[key] - if emptyCheck && val == "0" { - return nil - } - - if exists { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) - } - - return nil -} - -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. -func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - } -} - -// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with -// support for non-root modules -func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - } -} - -func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) - } - - return nil -} - -// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the -// value is a pointer so that it can be updated while the test is running. -// It will only be dereferenced at the point this step is run. -func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckResourceAttr(name, key, *value)(s) - } -} - -// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with -// support for non-root modules -func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckModuleResourceAttr(mp, name, key, *value)(s) - } -} - -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. -func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { - return func(s *terraform.State) error { - isFirst, err := primaryInstanceState(s, nameFirst) - if err != nil { - return err - } - - isSecond, err := primaryInstanceState(s, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } -} - -// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with -// support for non-root modules -func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { - mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() - mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() - return func(s *terraform.State) error { - isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) - if err != nil { - return err - } - - isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } -} - -func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { - vFirst, okFirst := isFirst.Attributes[keyFirst] - vSecond, okSecond := isSecond.Attributes[keySecond] - - // Container count values of 0 should not be relied upon, and not reliably - // maintained by helper/schema. For the purpose of tests, consider unset and - // 0 to be equal. - if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && - (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { - // they have the same suffix, and it is a collection count key. - if vFirst == "0" || vFirst == "" { - okFirst = false - } - if vSecond == "0" || vSecond == "" { - okSecond = false - } - } - - if okFirst != okSecond { - if !okFirst { - return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) - } - return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) - } - if !(okFirst || okSecond) { - // If they both don't exist then they are equally unset, so that's okay. - return nil - } - - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) - } - - return nil -} - -// TestCheckOutput checks an output in the Terraform configuration -func TestCheckOutput(name, value string) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Value != value { - return fmt.Errorf( - "Output '%s': expected %#v, got %#v", - name, - value, - rs) - } - - return nil - } -} - -func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if !r.MatchString(rs.Value.(string)) { - return fmt.Errorf( - "Output '%s': %#v didn't match %q", - name, - rs, - r.String()) - } - - return nil - } -} - -// TestT is the interface used to handle the test lifecycle of a test. -// -// Users should just use a *testing.T object, which implements this. -type TestT interface { - Error(args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) - Name() string - Parallel() -} - -// This is set to true by unit tests to alter some behavior -var testTesting = false - -// modulePrimaryInstanceState returns the instance state for the given resource -// name in a ModuleState -func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { - rs, ok := ms.Resources[name] - if !ok { - return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) - } - - is := rs.Primary - if is == nil { - return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) - } - - return is, nil -} - -// modulePathPrimaryInstanceState returns the primary instance state for the -// given resource name in a given module path. -func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { - ms := s.ModuleByPath(mp) - if ms == nil { - return nil, fmt.Errorf("No module found at: %s", mp) - } - - return modulePrimaryInstanceState(s, ms, name) -} - -// primaryInstanceState returns the primary instance state for the given -// resource name in the root module. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() - return modulePrimaryInstanceState(s, ms, name) -} - -// operationError is a specialized implementation of error used to describe -// failures during one of the several operations performed for a particular -// test case. -type operationError struct { - OpName string - Diags tfdiags.Diagnostics -} - -func newOperationError(opName string, diags tfdiags.Diagnostics) error { - return operationError{opName, diags} -} - -// Error returns a terse error string containing just the basic diagnostic -// messages, for situations where normal Go error behavior is appropriate. -func (err operationError) Error() string { - return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) -} - -// ErrorDetail is like Error except it includes verbosely-rendered diagnostics -// similar to what would come from a normal Terraform run, which include -// additional context not included in Error(). -func (err operationError) ErrorDetail() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "errors during %s:", err.OpName) - clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} - for _, diag := range err.Diags { - diagStr := format.Diagnostic(diag, nil, clr, 78) - buf.WriteByte('\n') - buf.WriteString(diagStr) - } - return buf.String() -} - -// detailedErrorMessage is a helper for calling ErrorDetail on an error if -// it is an operationError or just taking Error otherwise. -func detailedErrorMessage(err error) string { - switch tErr := err.(type) { - case operationError: - return tErr.ErrorDetail() - default: - return err.Error() - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go deleted file mode 100644 index e3da9bc0..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ /dev/null @@ -1,404 +0,0 @@ -package resource - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "log" - "sort" - "strings" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" -) - -// testStepConfig runs a config-mode test step -func testStepConfig( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - return testStep(opts, state, step) -} - -func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { - if !step.Destroy { - if err := testStepTaint(state, step); err != nil { - return state, err - } - } - - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - var stepDiags tfdiags.Diagnostics - - // Build the context - opts.Config = cfg - opts.State, err = terraform.ShimLegacyState(state) - if err != nil { - return nil, err - } - - opts.Destroy = step.Destroy - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) - } - if stepDiags := ctx.Validate(); len(stepDiags) > 0 { - if stepDiags.HasErrors() { - return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", stepDiags) - } - - // Refresh! - newState, stepDiags := ctx.Refresh() - // shim the state first so the test can check the state on errors - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("refresh", stepDiags) - } - - // If this step is a PlanOnly step, skip over this first Plan and subsequent - // Apply, and use the follow up Plan that checks for perpetual diffs - if !step.PlanOnly { - // Plan! - if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("plan", stepDiags) - } else { - log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) - } - - // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behavior in the check - // function - stateBeforeApplication := state.DeepCopy() - - // Apply the diff, creating real resources. - newState, stepDiags = ctx.Apply() - // shim the state first so the test can check the state on errors - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("apply", stepDiags) - } - - // Run any configured checks - if step.Check != nil { - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Now, verify that Plan is now empty and we don't have a perpetual diff issue - // We do this with TWO plans. One without a refresh. - var p *plans.Plan - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("follow-up plan", stepDiags) - } - if !p.Changes.Empty() { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // And another after a Refresh. - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - newState, stepDiags = ctx.Refresh() - if stepDiags.HasErrors() { - return state, newOperationError("follow-up refresh", stepDiags) - } - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - } - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("second follow-up refresh", stepDiags) - } - empty := p.Changes.Empty() - - // Data resources are tricky because they legitimately get instantiated - // during refresh so that they will be already populated during the - // plan walk. Because of this, if we have any data resources in the - // config we'll end up wanting to destroy them again here. This is - // acceptable and expected, and we'll treat it as "empty" for the - // sake of this testing. - if step.Destroy && !empty { - empty = true - for _, change := range p.Changes.Resources { - if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { - empty = false - break - } - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && empty { - return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // Made it here? Good job test step! - return state, nil -} - -// legacyPlanComparisonString produces a string representation of the changes -// from a plan and a given state togther, as was formerly produced by the -// String method of terraform.Plan. -// -// This is here only for compatibility with existing tests that predate our -// new plan and state types, and should not be used in new tests. Instead, use -// a library like "cmp" to do a deep equality and diff on the two -// data structures. -func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { - return fmt.Sprintf( - "DIFF:\n\n%s\n\nSTATE:\n\n%s", - legacyDiffComparisonString(changes), - state.String(), - ) -} - -// legacyDiffComparisonString produces a string representation of the changes -// from a planned changes object, as was formerly produced by the String method -// of terraform.Diff. -// -// This is here only for compatibility with existing tests that predate our -// new plan types, and should not be used in new tests. Instead, use a library -// like "cmp" to do a deep equality check and diff on the two data structures. -func legacyDiffComparisonString(changes *plans.Changes) string { - // The old string representation of a plan was grouped by module, but - // our new plan structure is not grouped in that way and so we'll need - // to preprocess it in order to produce that grouping. - type ResourceChanges struct { - Current *plans.ResourceInstanceChangeSrc - Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc - } - byModule := map[string]map[string]*ResourceChanges{} - resourceKeys := map[string][]string{} - requiresReplace := map[string][]string{} - var moduleKeys []string - for _, rc := range changes.Resources { - if rc.Action == plans.NoOp { - // We won't mention no-op changes here at all, since the old plan - // model we are emulating here didn't have such a concept. - continue - } - moduleKey := rc.Addr.Module.String() - if _, exists := byModule[moduleKey]; !exists { - moduleKeys = append(moduleKeys, moduleKey) - byModule[moduleKey] = make(map[string]*ResourceChanges) - } - resourceKey := rc.Addr.Resource.String() - if _, exists := byModule[moduleKey][resourceKey]; !exists { - resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) - byModule[moduleKey][resourceKey] = &ResourceChanges{ - Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), - } - } - - if rc.DeposedKey == states.NotDeposed { - byModule[moduleKey][resourceKey].Current = rc - } else { - byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc - } - - rr := []string{} - for _, p := range rc.RequiredReplace.List() { - rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) - } - requiresReplace[resourceKey] = rr - } - sort.Strings(moduleKeys) - for _, ks := range resourceKeys { - sort.Strings(ks) - } - - var buf bytes.Buffer - - for _, moduleKey := range moduleKeys { - rcs := byModule[moduleKey] - var mBuf bytes.Buffer - - for _, resourceKey := range resourceKeys[moduleKey] { - rc := rcs[resourceKey] - - forceNewAttrs := requiresReplace[resourceKey] - - crud := "UPDATE" - if rc.Current != nil { - switch rc.Current.Action { - case plans.DeleteThenCreate: - crud = "DESTROY/CREATE" - case plans.CreateThenDelete: - crud = "CREATE/DESTROY" - case plans.Delete: - crud = "DESTROY" - case plans.Create: - crud = "CREATE" - } - } else { - // We must be working on a deposed object then, in which - // case destroying is the only possible action. - crud = "DESTROY" - } - - extra := "" - if rc.Current == nil && len(rc.Deposed) > 0 { - extra = " (deposed only)" - } - - fmt.Fprintf( - &mBuf, "%s: %s%s\n", - crud, resourceKey, extra, - ) - - attrNames := map[string]bool{} - var oldAttrs map[string]string - var newAttrs map[string]string - if rc.Current != nil { - if before := rc.Current.Before; before != nil { - ty, err := before.ImpliedType() - if err == nil { - val, err := before.Decode(ty) - if err == nil { - oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range oldAttrs { - attrNames[k] = true - } - } - } - } - if after := rc.Current.After; after != nil { - ty, err := after.ImpliedType() - if err == nil { - val, err := after.Decode(ty) - if err == nil { - newAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range newAttrs { - attrNames[k] = true - } - } - } - } - } - if oldAttrs == nil { - oldAttrs = make(map[string]string) - } - if newAttrs == nil { - newAttrs = make(map[string]string) - } - - attrNamesOrder := make([]string, 0, len(attrNames)) - keyLen := 0 - for n := range attrNames { - attrNamesOrder = append(attrNamesOrder, n) - if len(n) > keyLen { - keyLen = len(n) - } - } - sort.Strings(attrNamesOrder) - - for _, attrK := range attrNamesOrder { - v := newAttrs[attrK] - u := oldAttrs[attrK] - - if v == hcl2shim.UnknownVariableValue { - v = "" - } - // NOTE: we don't support here because we would - // need schema to do that. Excluding sensitive values - // is now done at the UI layer, and so should not be tested - // at the core layer. - - updateMsg := "" - - // This may not be as precise as in the old diff, as it matches - // everything under the attribute that was originally marked as - // ForceNew, but should help make it easier to determine what - // caused replacement here. - for _, k := range forceNewAttrs { - if strings.HasPrefix(attrK, k) { - updateMsg = " (forces new resource)" - break - } - } - - fmt.Fprintf( - &mBuf, " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, v, - updateMsg, - ) - } - } - - if moduleKey == "" { // root module - buf.Write(mBuf.Bytes()) - buf.WriteByte('\n') - continue - } - - fmt.Fprintf(&buf, "%s:\n", moduleKey) - s := bufio.NewScanner(&mBuf) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return buf.String() -} - -func testStepTaint(state *terraform.State, step TestStep) error { - for _, p := range step.Taint { - m := state.RootModule() - if m == nil { - return errors.New("no state") - } - rs, ok := m.Resources[p] - if !ok { - return fmt.Errorf("resource %q not found in state", p) - } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) - rs.Taint() - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go deleted file mode 100644 index 9a3ef1be..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ /dev/null @@ -1,232 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// testStepImportState runs an import state test step -func testStepImportState( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - - // Determine the ID to import - var importId string - switch { - case step.ImportStateIdFunc != nil: - var err error - importId, err = step.ImportStateIdFunc(state) - if err != nil { - return state, err - } - case step.ImportStateId != "": - importId = step.ImportStateId - default: - resource, err := testResource(step, state) - if err != nil { - return state, err - } - importId = resource.Primary.ID - } - - importPrefix := step.ImportStateIdPrefix - if importPrefix != "" { - importId = fmt.Sprintf("%s%s", importPrefix, importId) - } - - // Setup the context. We initialize with an empty state. We use the - // full config for provider configurations. - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - opts.Config = cfg - - // import tests start with empty state - opts.State = states.NewState() - - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, stepDiags.Err() - } - - // The test step provides the resource address as a string, so we need - // to parse it to get an addrs.AbsResourceAddress to pass in to the - // import method. - traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) - if hclDiags.HasErrors() { - return nil, hclDiags - } - importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) - if stepDiags.HasErrors() { - return nil, stepDiags.Err() - } - - // Do the import - importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ - // Set the module so that any provider config is loaded - Config: cfg, - - Targets: []*terraform.ImportTarget{ - &terraform.ImportTarget{ - Addr: importAddr, - ID: importId, - }, - }, - }) - if stepDiags.HasErrors() { - log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) - return state, stepDiags.Err() - } - - newState, err := shimNewState(importedState, step.providers) - if err != nil { - return nil, err - } - // Go through the new state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range newState.RootModule().Resources { - if r.Primary != nil { - is := r.Primary.DeepCopy() - is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type - states = append(states, is) - } - } - if err := step.ImportStateCheck(states); err != nil { - return state, err - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := newState.RootModule().Resources - old := state.RootModule().Resources - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for _, r2 := range old { - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { - oldR = r2 - break - } - } - if oldR == nil { - return state, fmt.Errorf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - var rsrcSchema *schema.Resource - if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { - providerType := providerAddr.ProviderConfig.Type.LegacyString() - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - } - - // don't add empty flatmapped containers, so we can more easily - // compare the attributes - skipEmpty := func(k, v string) bool { - if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { - if v == "0" { - return true - } - } - return false - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - actual[k] = v - } - - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return state, fmt.Errorf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - // Return the old state (non-imported) so we don't change anything. - return state, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go deleted file mode 100644 index e56a5155..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go +++ /dev/null @@ -1,84 +0,0 @@ -package resource - -import ( - "sync" - "time" -) - -// Retry is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -func Retry(timeout time.Duration, f RetryFunc) error { - // These are used to pull the error out of the function; need a mutex to - // avoid a data race. - var resultErr error - var resultErrMu sync.Mutex - - c := &StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * time.Millisecond, - Refresh: func() (interface{}, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil - } - return nil, "quit", rerr.Err - }, - } - - _, waitErr := c.WaitForState() - - // Need to acquire the lock here to be able to avoid race using resultErr as - // the return value - resultErrMu.Lock() - defer resultErrMu.Unlock() - - // resultErr may be nil because the wait timed out and resultErr was never - // set; this is still an error - if resultErr == nil { - return waitErr - } - // resultErr takes precedence over waitErr if both are set because it is - // more likely to be useful - return resultErr -} - -// RetryFunc is the function retried until it succeeds. -type RetryFunc func() *RetryError - -// RetryError is the required return type of RetryFunc. It forces client code -// to choose whether or not a given error is retryable. -type RetryError struct { - Err error - Retryable bool -} - -// RetryableError is a helper to create a RetryError that's retryable from a -// given error. -func RetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: true} -} - -// NonRetryableError is a helper to create a RetryError that's _not_ retryable -// from a given error. -func NonRetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: false} -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go deleted file mode 100644 index 42c2bed9..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go +++ /dev/null @@ -1,200 +0,0 @@ -package schema - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" - ctyconvert "github.com/zclconf/go-cty/cty/convert" -) - -// Backend represents a partial backend.Backend implementation and simplifies -// the creation of configuration loading and validation. -// -// Unlike other schema structs such as Provider, this struct is meant to be -// embedded within your actual implementation. It provides implementations -// only for Input and Configure and gives you a method for accessing the -// configuration in the form of a ResourceData that you're expected to call -// from the other implementation funcs. -type Backend struct { - // Schema is the schema for the configuration of this backend. If this - // Backend has no configuration this can be omitted. - Schema map[string]*Schema - - // ConfigureFunc is called to configure the backend. Use the - // FromContext* methods to extract information from the context. - // This can be nil, in which case nothing will be called but the - // config will still be stored. - ConfigureFunc func(context.Context) error - - config *ResourceData -} - -var ( - backendConfigKey = contextKey("backend config") -) - -// FromContextBackendConfig extracts a ResourceData with the configuration -// from the context. This should only be called by Backend functions. -func FromContextBackendConfig(ctx context.Context) *ResourceData { - return ctx.Value(backendConfigKey).(*ResourceData) -} - -func (b *Backend) ConfigSchema() *configschema.Block { - // This is an alias of CoreConfigSchema just to implement the - // backend.Backend interface. - return b.CoreConfigSchema() -} - -func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - if b == nil { - return configVal, nil - } - var diags tfdiags.Diagnostics - var err error - - // In order to use Transform below, this needs to be filled out completely - // according the schema. - configVal, err = b.CoreConfigSchema().CoerceValue(configVal) - if err != nil { - return configVal, diags.Append(err) - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := b.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return configVal, diags - } - - shimRC := b.shimConfig(configVal) - warns, errs := schemaMap(b.Schema).Validate(shimRC) - for _, warn := range warns { - diags = diags.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - diags = diags.Append(err) - } - return configVal, diags -} - -func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { - if b == nil { - return nil - } - - var diags tfdiags.Diagnostics - sm := schemaMap(b.Schema) - shimRC := b.shimConfig(obj) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, shimRC, nil, nil, true) - if err != nil { - diags = diags.Append(err) - return diags - } - - data, err := sm.Data(nil, diff) - if err != nil { - diags = diags.Append(err) - return diags - } - b.config = data - - if b.ConfigureFunc != nil { - err = b.ConfigureFunc(context.WithValue( - context.Background(), backendConfigKey, data)) - if err != nil { - diags = diags.Append(err) - return diags - } - } - - return diags -} - -// shimConfig turns a new-style cty.Value configuration (which must be of -// an object type) into a minimal old-style *terraform.ResourceConfig object -// that should be populated enough to appease the not-yet-updated functionality -// in this package. This should be removed once everything is updated. -func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { - shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) - if !ok { - // If the configVal was nil, we still want a non-nil map here. - shimMap = map[string]interface{}{} - } - return &terraform.ResourceConfig{ - Config: shimMap, - Raw: shimMap, - } -} - -// Config returns the configuration. This is available after Configure is -// called. -func (b *Backend) Config() *ResourceData { - return b.config -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go deleted file mode 100644 index 9efc90e7..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go +++ /dev/null @@ -1,469 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" -) - -var ReservedProviderFields = []string{ - "alias", - "version", -} - -// Provider represents a resource provider in Terraform, and properly -// implements all of the ResourceProvider API. -// -// By defining a schema for the configuration of the provider, the -// map of supporting resources, and a configuration function, the schema -// framework takes over and handles all the provider operations for you. -// -// After defining the provider structure, it is unlikely that you'll require any -// of the methods on Provider itself. -type Provider struct { - // Schema is the schema for the configuration of this provider. If this - // provider has no configuration, this can be omitted. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ResourcesMap is the list of available resources that this provider - // can manage, along with their Resource structure defining their - // own schemas and CRUD operations. - // - // Provider automatically handles routing operations such as Apply, - // Diff, etc. to the proper resource. - ResourcesMap map[string]*Resource - - // DataSourcesMap is the collection of available data sources that - // this provider implements, with a Resource instance defining - // the schema and Read operation of each. - // - // Resource instances for data sources must have a Read function - // and must *not* implement Create, Update or Delete. - DataSourcesMap map[string]*Resource - - // ConfigureFunc is a function for configuring the provider. If the - // provider doesn't need to be configured, this can be omitted. - // - // See the ConfigureFunc documentation for more information. - ConfigureFunc ConfigureFunc - - // MetaReset is called by TestReset to reset any state stored in the meta - // interface. This is especially important if the StopContext is stored by - // the provider. - MetaReset func() error - - meta interface{} - - // a mutex is required because TestReset can directly replace the stopCtx - stopMu sync.Mutex - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once - - TerraformVersion string -} - -// ConfigureFunc is the function used to configure a Provider. -// -// The interface{} value returned by this function is stored and passed into -// the subsequent resources as the meta parameter. This return value is -// usually used to pass along a configured API client, a configuration -// structure, etc. -type ConfigureFunc func(*ResourceData) (interface{}, error) - -// InternalValidate should be called to validate the structure -// of the provider. -// -// This should be called in a unit test for any provider to verify -// before release that a provider is properly configured for use with -// this library. -func (p *Provider) InternalValidate() error { - if p == nil { - return errors.New("provider is nil") - } - - var validationErrors error - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - - // Provider-specific checks - for k, _ := range sm { - if isReservedProviderFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a provider", k) - } - } - - for k, r := range p.ResourcesMap { - if err := r.InternalValidate(nil, true); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) - } - } - - for k, r := range p.DataSourcesMap { - if err := r.InternalValidate(nil, false); err != nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) - } - } - - return validationErrors -} - -func isReservedProviderFieldName(name string) bool { - for _, reservedName := range ReservedProviderFields { - if name == reservedName { - return true - } - } - return false -} - -// Meta returns the metadata associated with this provider that was -// returned by the Configure call. It will be nil until Configure is called. -func (p *Provider) Meta() interface{} { - return p.meta -} - -// SetMeta can be used to forcefully set the Meta object of the provider. -// Note that if Configure is called the return value will override anything -// set here. -func (p *Provider) SetMeta(v interface{}) { - p.meta = v -} - -// Stopped reports whether the provider has been stopped or not. -func (p *Provider) Stopped() bool { - ctx := p.StopContext() - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// StopCh returns a channel that is closed once the provider is stopped. -func (p *Provider) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - return p.stopCtx -} - -func (p *Provider) stopInit() { - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvider interface. -func (p *Provider) Stop() error { - p.stopOnce.Do(p.stopInit) - - p.stopMu.Lock() - defer p.stopMu.Unlock() - - p.stopCtxCancel() - return nil -} - -// TestReset resets any state stored in the Provider, and will call TestReset -// on Meta if it implements the TestProvider interface. -// This may be used to reset the schema.Provider at the start of a test, and is -// automatically called by resource.Test. -func (p *Provider) TestReset() error { - p.stopInit() - if p.MetaReset != nil { - return p.MetaReset() - } - return nil -} - -// GetSchema implementation of terraform.ResourceProvider interface -func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - resourceTypes := map[string]*configschema.Block{} - dataSources := map[string]*configschema.Block{} - - for _, name := range req.ResourceTypes { - if r, exists := p.ResourcesMap[name]; exists { - resourceTypes[name] = r.CoreConfigSchema() - } - } - for _, name := range req.DataSources { - if r, exists := p.DataSourcesMap[name]; exists { - dataSources[name] = r.CoreConfigSchema() - } - } - - return &terraform.ProviderSchema{ - Provider: schemaMap(p.Schema).CoreConfigSchema(), - ResourceTypes: resourceTypes, - DataSources: dataSources, - }, nil -} - -// Input implementation of terraform.ResourceProvider interface. -func (p *Provider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return schemaMap(p.Schema).Input(input, c) -} - -// Validate implementation of terraform.ResourceProvider interface. -func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provider failed! This is always a bug\n"+ - "with the provider itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - return schemaMap(p.Schema).Validate(c) -} - -// ValidateResource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.ResourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support resource: %s", t)} - } - - return r.Validate(c) -} - -// Configure implementation of terraform.ResourceProvider interface. -func (p *Provider) Configure(c *terraform.ResourceConfig) error { - // No configuration - if p.ConfigureFunc == nil { - return nil - } - - sm := schemaMap(p.Schema) - - // Get a ResourceData for this configuration. To do this, we actually - // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c, nil, p.meta, true) - if err != nil { - return err - } - - data, err := sm.Data(nil, diff) - if err != nil { - return err - } - - meta, err := p.ConfigureFunc(data) - if err != nil { - return err - } - - p.meta = meta - return nil -} - -// Apply implementation of terraform.ResourceProvider interface. -func (p *Provider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Apply(s, d, p.meta) -} - -// Diff implementation of terraform.ResourceProvider interface. -func (p *Provider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Diff(s, c, p.meta) -} - -// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't -// attempt to calculate ignore_changes. -func (p *Provider) SimpleDiff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.simpleDiff(s, c, p.meta) -} - -// Refresh implementation of terraform.ResourceProvider interface. -func (p *Provider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - return r.Refresh(s, p.meta) -} - -// Resources implementation of terraform.ResourceProvider interface. -func (p *Provider) Resources() []terraform.ResourceType { - keys := make([]string, 0, len(p.ResourcesMap)) - for k := range p.ResourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.ResourceType, 0, len(keys)) - for _, k := range keys { - resource := p.ResourcesMap[k] - - // This isn't really possible (it'd fail InternalValidate), but - // we do it anyways to avoid a panic. - if resource == nil { - resource = &Resource{} - } - - result = append(result, terraform.ResourceType{ - Name: k, - Importable: resource.Importer != nil, - - // Indicates that a provider is compiled against a new enough - // version of core to support the GetSchema method. - SchemaAvailable: true, - }) - } - - return result -} - -func (p *Provider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - // Find the resource - r, ok := p.ResourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown resource type: %s", info.Type) - } - - // If it doesn't support import, error - if r.Importer == nil { - return nil, fmt.Errorf("resource %s doesn't support import", info.Type) - } - - // Create the data - data := r.Data(nil) - data.SetId(id) - data.SetType(info.Type) - - // Call the import function - results := []*ResourceData{data} - if r.Importer.State != nil { - var err error - results, err = r.Importer.State(data, p.meta) - if err != nil { - return nil, err - } - } - - // Convert the results to InstanceState values and return it - states := make([]*terraform.InstanceState, len(results)) - for i, r := range results { - states[i] = r.State() - } - - // Verify that all are non-nil. If there are any nil the error - // isn't obvious so we circumvent that with a friendlier error. - for _, s := range states { - if s == nil { - return nil, fmt.Errorf( - "nil entry in ImportState results. This is always a bug with\n" + - "the resource that is being imported. Please report this as\n" + - "a bug to Terraform.") - } - } - - return states, nil -} - -// ValidateDataSource implementation of terraform.ResourceProvider interface. -func (p *Provider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - r, ok := p.DataSourcesMap[t] - if !ok { - return nil, []error{fmt.Errorf( - "Provider doesn't support data source: %s", t)} - } - - return r.Validate(c) -} - -// ReadDataDiff implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.Diff(nil, c, p.meta) -} - -// RefreshData implementation of terraform.ResourceProvider interface. -func (p *Provider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - - r, ok := p.DataSourcesMap[info.Type] - if !ok { - return nil, fmt.Errorf("unknown data source: %s", info.Type) - } - - return r.ReadDataApply(d, p.meta) -} - -// DataSources implementation of terraform.ResourceProvider interface. -func (p *Provider) DataSources() []terraform.DataSource { - keys := make([]string, 0, len(p.DataSourcesMap)) - for k, _ := range p.DataSourcesMap { - keys = append(keys, k) - } - sort.Strings(keys) - - result := make([]terraform.DataSource, 0, len(keys)) - for _, k := range keys { - result = append(result, terraform.DataSource{ - Name: k, - - // Indicates that a provider is compiled against a new enough - // version of core to support the GetSchema method. - SchemaAvailable: true, - }) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go deleted file mode 100644 index eee155bf..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go +++ /dev/null @@ -1,205 +0,0 @@ -package schema - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" -) - -// Provisioner represents a resource provisioner in Terraform and properly -// implements all of the ResourceProvisioner API. -// -// This higher level structure makes it much easier to implement a new or -// custom provisioner for Terraform. -// -// The function callbacks for this structure are all passed a context object. -// This context object has a number of pre-defined values that can be accessed -// via the global functions defined in context.go. -type Provisioner struct { - // ConnSchema is the schema for the connection settings for this - // provisioner. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - // - // NOTE: The value of connection keys can only be strings for now. - ConnSchema map[string]*Schema - - // Schema is the schema for the usage of this provisioner. - // - // The keys of this map are the configuration keys, and the value is - // the schema describing the value of the configuration. - Schema map[string]*Schema - - // ApplyFunc is the function for executing the provisioner. This is required. - // It is given a context. See the Provisioner struct docs for more - // information. - ApplyFunc func(ctx context.Context) error - - // ValidateFunc is a function for extended validation. This is optional - // and should be used when individual field validation is not enough. - ValidateFunc func(*terraform.ResourceConfig) ([]string, []error) - - stopCtx context.Context - stopCtxCancel context.CancelFunc - stopOnce sync.Once -} - -// Keys that can be used to access data in the context parameters for -// Provisioners. -var ( - connDataInvalid = contextKey("data invalid") - - // This returns a *ResourceData for the connection information. - // Guaranteed to never be nil. - ProvConnDataKey = contextKey("provider conn data") - - // This returns a *ResourceData for the config information. - // Guaranteed to never be nil. - ProvConfigDataKey = contextKey("provider config data") - - // This returns a terraform.UIOutput. Guaranteed to never be nil. - ProvOutputKey = contextKey("provider output") - - // This returns the raw InstanceState passed to Apply. Guaranteed to - // be set, but may be nil. - ProvRawStateKey = contextKey("provider raw state") -) - -// InternalValidate should be called to validate the structure -// of the provisioner. -// -// This should be called in a unit test to verify before release that this -// structure is properly configured for use. -func (p *Provisioner) InternalValidate() error { - if p == nil { - return errors.New("provisioner is nil") - } - - var validationErrors error - { - sm := schemaMap(p.ConnSchema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - } - - { - sm := schemaMap(p.Schema) - if err := sm.InternalValidate(sm); err != nil { - validationErrors = multierror.Append(validationErrors, err) - } - } - - if p.ApplyFunc == nil { - validationErrors = multierror.Append(validationErrors, fmt.Errorf( - "ApplyFunc must not be nil")) - } - - return validationErrors -} - -// StopContext returns a context that checks whether a provisioner is stopped. -func (p *Provisioner) StopContext() context.Context { - p.stopOnce.Do(p.stopInit) - return p.stopCtx -} - -func (p *Provisioner) stopInit() { - p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) -} - -// Stop implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) Stop() error { - p.stopOnce.Do(p.stopInit) - p.stopCtxCancel() - return nil -} - -// GetConfigSchema implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) { - return schemaMap(p.Schema).CoreConfigSchema(), nil -} - -// Apply implementation of terraform.ResourceProvisioner interface. -func (p *Provisioner) Apply( - o terraform.UIOutput, - s *terraform.InstanceState, - c *terraform.ResourceConfig) error { - var connData, configData *ResourceData - - { - // We first need to turn the connection information into a - // terraform.ResourceConfig so that we can use that type to more - // easily build a ResourceData structure. We do this by simply treating - // the conn info as configuration input. - raw := make(map[string]interface{}) - if s != nil { - for k, v := range s.Ephemeral.ConnInfo { - raw[k] = v - } - } - - c := terraform.NewResourceConfigRaw(raw) - sm := schemaMap(p.ConnSchema) - diff, err := sm.Diff(nil, c, nil, nil, true) - if err != nil { - return err - } - connData, err = sm.Data(nil, diff) - if err != nil { - return err - } - } - - { - // Build the configuration data. Doing this requires making a "diff" - // even though that's never used. We use that just to get the correct types. - configMap := schemaMap(p.Schema) - diff, err := configMap.Diff(nil, c, nil, nil, true) - if err != nil { - return err - } - configData, err = configMap.Data(nil, diff) - if err != nil { - return err - } - } - - // Build the context and call the function - ctx := p.StopContext() - ctx = context.WithValue(ctx, ProvConnDataKey, connData) - ctx = context.WithValue(ctx, ProvConfigDataKey, configData) - ctx = context.WithValue(ctx, ProvOutputKey, o) - ctx = context.WithValue(ctx, ProvRawStateKey, s) - return p.ApplyFunc(ctx) -} - -// Validate implements the terraform.ResourceProvisioner interface. -func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { - if err := p.InternalValidate(); err != nil { - return nil, []error{fmt.Errorf( - "Internal validation of the provisioner failed! This is always a bug\n"+ - "with the provisioner itself, and not a user issue. Please report\n"+ - "this bug:\n\n%s", err)} - } - - if p.Schema != nil { - w, e := schemaMap(p.Schema).Validate(c) - ws = append(ws, w...) - es = append(es, e...) - } - - if p.ValidateFunc != nil { - w, e := p.ValidateFunc(c) - ws = append(ws, w...) - es = append(es, e...) - } - - return ws, es -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go deleted file mode 100644 index 8cd2703a..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ /dev/null @@ -1,831 +0,0 @@ -package schema - -import ( - "errors" - "fmt" - "log" - "strconv" - - "github.com/hashicorp/terraform/terraform" - "github.com/zclconf/go-cty/cty" -) - -var ReservedDataSourceFields = []string{ - "connection", - "count", - "depends_on", - "lifecycle", - "provider", - "provisioner", -} - -var ReservedResourceFields = []string{ - "connection", - "count", - "depends_on", - "id", - "lifecycle", - "provider", - "provisioner", -} - -// Resource represents a thing in Terraform that has a set of configurable -// attributes and a lifecycle (create, read, update, delete). -// -// The Resource schema is an abstraction that allows provider writers to -// worry only about CRUD operations while off-loading validation, diff -// generation, etc. to this higher level library. -// -// In spite of the name, this struct is not used only for terraform resources, -// but also for data sources. In the case of data sources, the Create, -// Update and Delete functions must not be provided. -type Resource struct { - // Schema is the schema for the configuration of this resource. - // - // The keys of this map are the configuration keys, and the values - // describe the schema of the configuration value. - // - // The schema is used to represent both configurable data as well - // as data that might be computed in the process of creating this - // resource. - Schema map[string]*Schema - - // SchemaVersion is the version number for this resource's Schema - // definition. The current SchemaVersion stored in the state for each - // resource. Provider authors can increment this version number - // when Schema semantics change. If the State's SchemaVersion is less than - // the current SchemaVersion, the InstanceState is yielded to the - // MigrateState callback, where the provider can make whatever changes it - // needs to update the state to be compatible to the latest version of the - // Schema. - // - // When unset, SchemaVersion defaults to 0, so provider authors can start - // their Versioning at any integer >= 1 - SchemaVersion int - - // MigrateState is deprecated and any new changes to a resource's schema - // should be handled by StateUpgraders. Existing MigrateState implementations - // should remain for compatibility with existing state. MigrateState will - // still be called if the stored SchemaVersion is less than the - // first version of the StateUpgraders. - // - // MigrateState is responsible for updating an InstanceState with an old - // version to the format expected by the current version of the Schema. - // - // It is called during Refresh if the State's stored SchemaVersion is less - // than the current SchemaVersion of the Resource. - // - // The function is yielded the state's stored SchemaVersion and a pointer to - // the InstanceState that needs updating, as well as the configured - // provider's configured meta interface{}, in case the migration process - // needs to make any remote API calls. - MigrateState StateMigrateFunc - - // StateUpgraders contains the functions responsible for upgrading an - // existing state with an old schema version to a newer schema. It is - // called specifically by Terraform when the stored schema version is less - // than the current SchemaVersion of the Resource. - // - // StateUpgraders map specific schema versions to a StateUpgrader - // function. The registered versions are expected to be ordered, - // consecutive values. The initial value may be greater than 0 to account - // for legacy schemas that weren't recorded and can be handled by - // MigrateState. - StateUpgraders []StateUpgrader - - // The functions below are the CRUD operations for this resource. - // - // The only optional operation is Update. If Update is not implemented, - // then updates will not be supported for this resource. - // - // The ResourceData parameter in the functions below are used to - // query configuration and changes for the resource as well as to set - // the ID, computed data, etc. - // - // The interface{} parameter is the result of the ConfigureFunc in - // the provider for this resource. If the provider does not define - // a ConfigureFunc, this will be nil. This parameter should be used - // to store API clients, configuration structures, etc. - // - // If any errors occur during each of the operation, an error should be - // returned. If a resource was partially updated, be careful to enable - // partial state mode for ResourceData and use it accordingly. - // - // Exists is a function that is called to check if a resource still - // exists. If this returns false, then this will affect the diff - // accordingly. If this function isn't set, it will not be called. You - // can also signal existence in the Read method by calling d.SetId("") - // if the Resource is no longer present and should be removed from state. - // The *ResourceData passed to Exists should _not_ be modified. - Create CreateFunc - Read ReadFunc - Update UpdateFunc - Delete DeleteFunc - Exists ExistsFunc - - // CustomizeDiff is a custom function for working with the diff that - // Terraform has created for this resource - it can be used to customize the - // diff that has been created, diff values not controlled by configuration, - // or even veto the diff altogether and abort the plan. It is passed a - // *ResourceDiff, a structure similar to ResourceData but lacking most write - // functions like Set, while introducing new functions that work with the - // diff such as SetNew, SetNewComputed, and ForceNew. - // - // The phases Terraform runs this in, and the state available via functions - // like Get and GetChange, are as follows: - // - // * New resource: One run with no state - // * Existing resource: One run with state - // * Existing resource, forced new: One run with state (before ForceNew), - // then one run without state (as if new resource) - // * Tainted resource: No runs (custom diff logic is skipped) - // * Destroy: No runs (standard diff logic is skipped on destroy diffs) - // - // This function needs to be resilient to support all scenarios. - // - // If this function needs to access external API resources, remember to flag - // the RequiresRefresh attribute mentioned below to ensure that - // -refresh=false is blocked when running plan or apply, as this means that - // this resource requires refresh-like behaviour to work effectively. - // - // For the most part, only computed fields can be customized by this - // function. - // - // This function is only allowed on regular resources (not data sources). - CustomizeDiff CustomizeDiffFunc - - // Importer is the ResourceImporter implementation for this resource. - // If this is nil, then this resource does not support importing. If - // this is non-nil, then it supports importing and ResourceImporter - // must be validated. The validity of ResourceImporter is verified - // by InternalValidate on Resource. - Importer *ResourceImporter - - // If non-empty, this string is emitted as a warning during Validate. - DeprecationMessage string - - // Timeouts allow users to specify specific time durations in which an - // operation should time out, to allow them to extend an action to suit their - // usage. For example, a user may specify a large Creation timeout for their - // AWS RDS Instance due to it's size, or restoring from a snapshot. - // Resource implementors must enable Timeout support by adding the allowed - // actions (Create, Read, Update, Delete, Default) to the Resource struct, and - // accessing them in the matching methods. - Timeouts *ResourceTimeout -} - -// ShimInstanceStateFromValue converts a cty.Value to a -// terraform.InstanceState. -func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { - // Get the raw shimmed value. While this is correct, the set hashes don't - // match those from the Schema. - s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) - - // We now rebuild the state through the ResourceData, so that the set indexes - // match what helper/schema expects. - data, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - return nil, err - } - - s = data.State() - if s == nil { - s = &terraform.InstanceState{} - } - return s, nil -} - -// See Resource documentation. -type CreateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ReadFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type UpdateFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type DeleteFunc func(*ResourceData, interface{}) error - -// See Resource documentation. -type ExistsFunc func(*ResourceData, interface{}) (bool, error) - -// See Resource documentation. -type StateMigrateFunc func( - int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) - -type StateUpgrader struct { - // Version is the version schema that this Upgrader will handle, converting - // it to Version+1. - Version int - - // Type describes the schema that this function can upgrade. Type is - // required to decode the schema if the state was stored in a legacy - // flatmap format. - Type cty.Type - - // Upgrade takes the JSON encoded state and the provider meta value, and - // upgrades the state one single schema version. The provided state is - // deocded into the default json types using a map[string]interface{}. It - // is up to the StateUpgradeFunc to ensure that the returned value can be - // encoded using the new schema. - Upgrade StateUpgradeFunc -} - -// See StateUpgrader -type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) - -// See Resource documentation. -type CustomizeDiffFunc func(*ResourceDiff, interface{}) error - -// Apply creates, updates, and/or deletes a resource. -func (r *Resource) Apply( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - data, err := schemaMap(r.Schema).Data(s, d) - if err != nil { - return s, err - } - - // Instance Diff shoould have the timeout info, need to copy it over to the - // ResourceData meta - rt := ResourceTimeout{} - if _, ok := d.Meta[TimeoutKey]; ok { - if err := rt.DiffDecode(d); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } else if s != nil { - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - } else { - log.Printf("[DEBUG] No meta timeoutkey found in Apply()") - } - data.timeouts = &rt - - if s == nil { - // The Terraform API dictates that this should never happen, but - // it doesn't hurt to be safe in this case. - s = new(terraform.InstanceState) - } - - if d.Destroy || d.RequiresNew() { - if s.ID != "" { - // Destroy the resource since it is created - if err := r.Delete(data, meta); err != nil { - return r.recordCurrentSchemaVersion(data.State()), err - } - - // Make sure the ID is gone. - data.SetId("") - } - - // If we're only destroying, and not creating, then return - // now since we're done! - if !d.RequiresNew() { - return nil, nil - } - - // Reset the data to be stateless since we just destroyed - data, err = schemaMap(r.Schema).Data(nil, d) - // data was reset, need to re-apply the parsed timeouts - data.timeouts = &rt - if err != nil { - return nil, err - } - } - - err = nil - if data.Id() == "" { - // We're creating, it is a new resource. - data.MarkNewResource() - err = r.Create(data, meta) - } else { - if r.Update == nil { - return s, fmt.Errorf("doesn't support update") - } - - err = r.Update(data, meta) - } - - return r.recordCurrentSchemaVersion(data.State()), err -} - -// Diff returns a diff of this resource. -func (r *Resource) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - t := &ResourceTimeout{} - err := t.ConfigDecode(r, c) - - if err != nil { - return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) - } - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) - if err != nil { - return instanceDiff, err - } - - if instanceDiff != nil { - if err := t.DiffEncode(instanceDiff); err != nil { - log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) - } - } else { - log.Printf("[DEBUG] Instance Diff is nil in Diff()") - } - - return instanceDiff, err -} - -func (r *Resource) simpleDiff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) - if err != nil { - return instanceDiff, err - } - - if instanceDiff == nil { - instanceDiff = terraform.NewInstanceDiff() - } - - // Make sure the old value is set in each of the instance diffs. - // This was done by the RequiresNew logic in the full legacy Diff. - for k, attr := range instanceDiff.Attributes { - if attr == nil { - continue - } - if s != nil { - attr.Old = s.Attributes[k] - } - } - - return instanceDiff, nil -} - -// Validate validates the resource configuration against the schema. -func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { - warns, errs := schemaMap(r.Schema).Validate(c) - - if r.DeprecationMessage != "" { - warns = append(warns, r.DeprecationMessage) - } - - return warns, errs -} - -// ReadDataApply loads the data for a data source, given a diff that -// describes the configuration arguments and desired computed attributes. -func (r *Resource) ReadDataApply( - d *terraform.InstanceDiff, - meta interface{}, -) (*terraform.InstanceState, error) { - // Data sources are always built completely from scratch - // on each read, so the source state is always nil. - data, err := schemaMap(r.Schema).Data(nil, d) - if err != nil { - return nil, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - // Data sources can set an ID if they want, but they aren't - // required to; we'll provide a placeholder if they don't, - // to preserve the invariant that all resources have non-empty - // ids. - state.ID = "-" - } - - return r.recordCurrentSchemaVersion(state), err -} - -// RefreshWithoutUpgrade reads the instance state, but does not call -// MigrateState or the StateUpgraders, since those are now invoked in a -// separate API call. -// RefreshWithoutUpgrade is part of the new plugin shims. -func (r *Resource) RefreshWithoutUpgrade( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - // If the ID is already somehow blank, it doesn't exist - if s.ID == "" { - return nil, nil - } - - rt := ResourceTimeout{} - if _, ok := s.Meta[TimeoutKey]; ok { - if err := rt.StateDecode(s); err != nil { - log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) - } - } - - if r.Exists != nil { - // Make a copy of data so that if it is modified it doesn't - // affect our Read later. - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - - if err != nil { - return s, err - } - - exists, err := r.Exists(data, meta) - if err != nil { - return s, err - } - if !exists { - return nil, nil - } - } - - // there may be new StateUpgraders that need to be run - s, err := r.upgradeState(s, meta) - if err != nil { - return s, err - } - - data, err := schemaMap(r.Schema).Data(s, nil) - data.timeouts = &rt - if err != nil { - return s, err - } - - err = r.Read(data, meta) - state := data.State() - if state != nil && state.ID == "" { - state = nil - } - - return r.recordCurrentSchemaVersion(state), err -} - -func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - var err error - - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - migrate := needsMigration && r.MigrateState != nil - - if migrate { - s, err = r.MigrateState(stateSchemaVersion, s, meta) - if err != nil { - return s, err - } - } - - if len(r.StateUpgraders) == 0 { - return s, nil - } - - // If we ran MigrateState, then the stateSchemaVersion value is no longer - // correct. We can expect the first upgrade function to be the correct - // schema type version. - if migrate { - stateSchemaVersion = r.StateUpgraders[0].Version - } - - schemaType := r.CoreConfigSchema().ImpliedType() - // find the expected type to convert the state - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion == upgrader.Version { - schemaType = upgrader.Type - } - } - - // StateUpgraders only operate on the new JSON format state, so the state - // need to be converted. - stateVal, err := StateValueFromInstanceState(s, schemaType) - if err != nil { - return nil, err - } - - jsonState, err := StateValueToJSONMap(stateVal, schemaType) - if err != nil { - return nil, err - } - - for _, upgrader := range r.StateUpgraders { - if stateSchemaVersion != upgrader.Version { - continue - } - - jsonState, err = upgrader.Upgrade(jsonState, meta) - if err != nil { - return nil, err - } - stateSchemaVersion++ - } - - // now we need to re-flatmap the new state - stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) - if err != nil { - return nil, err - } - - return r.ShimInstanceStateFromValue(stateVal) -} - -// InternalValidate should be called to validate the structure -// of the resource. -// -// This should be called in a unit test for any resource to verify -// before release that a resource is properly configured for use with -// this library. -// -// Provider.InternalValidate() will automatically call this for all of -// the resources it manages, so you don't need to call this manually if it -// is part of a Provider. -func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { - if r == nil { - return errors.New("resource is nil") - } - - if !writable { - if r.Create != nil || r.Update != nil || r.Delete != nil { - return fmt.Errorf("must not implement Create, Update or Delete") - } - - // CustomizeDiff cannot be defined for read-only resources - if r.CustomizeDiff != nil { - return fmt.Errorf("cannot implement CustomizeDiff") - } - } - - tsm := topSchemaMap - - if r.isTopLevel() && writable { - // All non-Computed attributes must be ForceNew if Update is not defined - if r.Update == nil { - nonForceNewAttrs := make([]string, 0) - for k, v := range r.Schema { - if !v.ForceNew && !v.Computed { - nonForceNewAttrs = append(nonForceNewAttrs, k) - } - } - if len(nonForceNewAttrs) > 0 { - return fmt.Errorf( - "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) - } - } else { - nonUpdateableAttrs := make([]string, 0) - for k, v := range r.Schema { - if v.ForceNew || v.Computed && !v.Optional { - nonUpdateableAttrs = append(nonUpdateableAttrs, k) - } - } - updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) - if updateableAttrs == 0 { - return fmt.Errorf( - "All fields are ForceNew or Computed w/out Optional, Update is superfluous") - } - } - - tsm = schemaMap(r.Schema) - - // Destroy, and Read are required - if r.Read == nil { - return fmt.Errorf("Read must be implemented") - } - if r.Delete == nil { - return fmt.Errorf("Delete must be implemented") - } - - // If we have an importer, we need to verify the importer. - if r.Importer != nil { - if err := r.Importer.InternalValidate(); err != nil { - return err - } - } - - for k, f := range tsm { - if isReservedResourceFieldName(k, f) { - return fmt.Errorf("%s is a reserved field name", k) - } - } - } - - lastVersion := -1 - for _, u := range r.StateUpgraders { - if lastVersion >= 0 && u.Version-lastVersion > 1 { - return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) - } - - if u.Version >= r.SchemaVersion { - return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) - } - - if !u.Type.IsObjectType() { - return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) - } - - if u.Upgrade == nil { - return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) - } - - lastVersion = u.Version - } - - if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { - return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) - } - - // Data source - if r.isTopLevel() && !writable { - tsm = schemaMap(r.Schema) - for k, _ := range tsm { - if isReservedDataSourceFieldName(k) { - return fmt.Errorf("%s is a reserved field name", k) - } - } - } - - return schemaMap(r.Schema).InternalValidate(tsm) -} - -func isReservedDataSourceFieldName(name string) bool { - for _, reservedName := range ReservedDataSourceFields { - if name == reservedName { - return true - } - } - return false -} - -func isReservedResourceFieldName(name string, s *Schema) bool { - // Allow phasing out "id" - // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 - if name == "id" && (s.Deprecated != "" || s.Removed != "") { - return false - } - - for _, reservedName := range ReservedResourceFields { - if name == reservedName { - return true - } - } - return false -} - -// Data returns a ResourceData struct for this Resource. Each return value -// is a separate copy and can be safely modified differently. -// -// The data returned from this function has no actual affect on the Resource -// itself (including the state given to this function). -// -// This function is useful for unit tests and ResourceImporter functions. -func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { - result, err := schemaMap(r.Schema).Data(s, nil) - if err != nil { - // At the time of writing, this isn't possible (Data never returns - // non-nil errors). We panic to find this in the future if we have to. - // I don't see a reason for Data to ever return an error. - panic(err) - } - - // load the Resource timeouts - result.timeouts = r.Timeouts - if result.timeouts == nil { - result.timeouts = &ResourceTimeout{} - } - - // Set the schema version to latest by default - result.meta = map[string]interface{}{ - "schema_version": strconv.Itoa(r.SchemaVersion), - } - - return result -} - -// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing -// -// TODO: May be able to be removed with the above ResourceData function. -func (r *Resource) TestResourceData() *ResourceData { - return &ResourceData{ - schema: r.Schema, - } -} - -// SchemasForFlatmapPath tries its best to find a sequence of schemas that -// the given dot-delimited attribute path traverses through in the schema -// of the receiving Resource. -func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { - return SchemasForFlatmapPath(path, r.Schema) -} - -// Returns true if the resource is "top level" i.e. not a sub-resource. -func (r *Resource) isTopLevel() bool { - // TODO: This is a heuristic; replace with a definitive attribute? - return (r.Create != nil || r.Read != nil) -} - -// Determines if a given InstanceState needs to be migrated by checking the -// stored version number with the current SchemaVersion -func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { - // Get the raw interface{} value for the schema version. If it doesn't - // exist or is nil then set it to zero. - raw := is.Meta["schema_version"] - if raw == nil { - raw = "0" - } - - // Try to convert it to a string. If it isn't a string then we pretend - // that it isn't set at all. It should never not be a string unless it - // was manually tampered with. - rawString, ok := raw.(string) - if !ok { - rawString = "0" - } - - stateSchemaVersion, _ := strconv.Atoi(rawString) - - // Don't run MigrateState if the version is handled by a StateUpgrader, - // since StateMigrateFuncs are not required to handle unknown versions - maxVersion := r.SchemaVersion - if len(r.StateUpgraders) > 0 { - maxVersion = r.StateUpgraders[0].Version - } - - return stateSchemaVersion < maxVersion, stateSchemaVersion -} - -func (r *Resource) recordCurrentSchemaVersion( - state *terraform.InstanceState) *terraform.InstanceState { - if state != nil && r.SchemaVersion > 0 { - if state.Meta == nil { - state.Meta = make(map[string]interface{}) - } - state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) - } - return state -} - -// Noop is a convenience implementation of resource function which takes -// no action and returns no error. -func Noop(*ResourceData, interface{}) error { - return nil -} - -// RemoveFromState is a convenience implementation of a resource function -// which sets the resource ID to empty string (to remove it from state) -// and returns no error. -func RemoveFromState(d *ResourceData, _ interface{}) error { - d.SetId("") - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go deleted file mode 100644 index 5dada3ca..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go +++ /dev/null @@ -1,52 +0,0 @@ -package schema - -// ResourceImporter defines how a resource is imported in Terraform. This -// can be set onto a Resource struct to make it Importable. Not all resources -// have to be importable; if a Resource doesn't have a ResourceImporter then -// it won't be importable. -// -// "Importing" in Terraform is the process of taking an already-created -// resource and bringing it under Terraform management. This can include -// updating Terraform state, generating Terraform configuration, etc. -type ResourceImporter struct { - // The functions below must all be implemented for importing to work. - - // State is called to convert an ID to one or more InstanceState to - // insert into the Terraform state. If this isn't specified, then - // the ID is passed straight through. - State StateFunc -} - -// StateFunc is the function called to import a resource into the -// Terraform state. It is given a ResourceData with only ID set. This -// ID is going to be an arbitrary value given by the user and may not map -// directly to the ID format that the resource expects, so that should -// be validated. -// -// This should return a slice of ResourceData that turn into the state -// that was imported. This might be as simple as returning only the argument -// that was given to the function. In other cases (such as AWS security groups), -// an import may fan out to multiple resources and this will have to return -// multiple. -// -// To create the ResourceData structures for other resource types (if -// you have to), instantiate your resource and call the Data function. -type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) - -// InternalValidate should be called to validate the structure of this -// importer. This should be called in a unit test. -// -// Resource.InternalValidate() will automatically call this, so this doesn't -// need to be called manually. Further, Resource.InternalValidate() is -// automatically called by Provider.InternalValidate(), so you only need -// to internal validate the provider. -func (r *ResourceImporter) InternalValidate() error { - return nil -} - -// ImportStatePassthrough is an implementation of StateFunc that can be -// used to simply pass the ID directly through. This should be used only -// in the case that an ID-only refresh is possible. -func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { - return []*ResourceData{d}, nil -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go deleted file mode 100644 index 089e6b21..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ /dev/null @@ -1,1854 +0,0 @@ -// schema is a high-level framework for easily writing new providers -// for Terraform. Usage of schema is recommended over attempting to write -// to the low-level plugin interfaces manually. -// -// schema breaks down provider creation into simple CRUD operations for -// resources. The logic of diffing, destroying before creating, updating -// or creating, etc. is all handled by the framework. The plugin author -// only needs to implement a configuration schema and the CRUD operations and -// everything else is meant to just work. -// -// A good starting point is to view the Provider structure. -package schema - -import ( - "context" - "fmt" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/copystructure" - "github.com/mitchellh/mapstructure" -) - -// Name of ENV variable which (if not empty) prefers panic over error -const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" - -// type used for schema package context keys -type contextKey string - -var ( - protoVersionMu sync.Mutex - protoVersion5 = false -) - -func isProto5() bool { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - return protoVersion5 - -} - -// SetProto5 enables a feature flag for any internal changes required required -// to work with the new plugin protocol. This should not be called by -// provider. -func SetProto5() { - protoVersionMu.Lock() - defer protoVersionMu.Unlock() - protoVersion5 = true -} - -// Schema is used to describe the structure of a value. -// -// Read the documentation of the struct elements for important details. -type Schema struct { - // Type is the type of the value and must be one of the ValueType values. - // - // This type not only determines what type is expected/valid in configuring - // this value, but also what type is returned when ResourceData.Get is - // called. The types returned by Get are: - // - // TypeBool - bool - // TypeInt - int - // TypeFloat - float64 - // TypeString - string - // TypeList - []interface{} - // TypeMap - map[string]interface{} - // TypeSet - *schema.Set - // - Type ValueType - - // ConfigMode allows for overriding the default behaviors for mapping - // schema entries onto configuration constructs. - // - // By default, the Elem field is used to choose whether a particular - // schema is represented in configuration as an attribute or as a nested - // block; if Elem is a *schema.Resource then it's a block and it's an - // attribute otherwise. - // - // If Elem is *schema.Resource then setting ConfigMode to - // SchemaConfigModeAttr will force it to be represented in configuration - // as an attribute, which means that the Computed flag can be used to - // provide default elements when the argument isn't set at all, while still - // allowing the user to force zero elements by explicitly assigning an - // empty list. - // - // When Computed is set without Optional, the attribute is not settable - // in configuration at all and so SchemaConfigModeAttr is the automatic - // behavior, and SchemaConfigModeBlock is not permitted. - ConfigMode SchemaConfigMode - - // If one of these is set, then this item can come from the configuration. - // Both cannot be set. If Optional is set, the value is optional. If - // Required is set, the value is required. - // - // One of these must be set if the value is not computed. That is: - // value either comes from the config, is computed, or is both. - Optional bool - Required bool - - // If this is non-nil, the provided function will be used during diff - // of this field. If this is nil, a default diff for the type of the - // schema will be used. - // - // This allows comparison based on something other than primitive, list - // or map equality - for example SSH public keys may be considered - // equivalent regardless of trailing whitespace. - DiffSuppressFunc SchemaDiffSuppressFunc - - // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration. - // - // DefaultFunc can be specified to compute a dynamic default. - // Only one of Default or DefaultFunc can be set. If DefaultFunc is - // used then its return value should be stable to avoid generating - // confusing/perpetual diffs. - // - // Changing either Default or the return value of DefaultFunc can be - // a breaking change, especially if the attribute in question has - // ForceNew set. If a default needs to change to align with changing - // assumptions in an upstream API then it may be necessary to also use - // the MigrateState function on the resource to change the state to match, - // or have the Read function adjust the state value to align with the - // new default. - // - // If Required is true above, then Default cannot be set. DefaultFunc - // can be set with Required. If the DefaultFunc returns nil, then there - // will be no default and the user will be asked to fill it in. - // - // If either of these is set, then the user won't be asked for input - // for this key if the default is not nil. - Default interface{} - DefaultFunc SchemaDefaultFunc - - // Description is used as the description for docs or asking for user - // input. It should be relatively short (a few sentences max) and should - // be formatted to fit a CLI. - Description string - - // InputDefault is the default value to use for when inputs are requested. - // This differs from Default in that if Default is set, no input is - // asked for. If Input is asked, this will be the default value offered. - InputDefault string - - // The fields below relate to diffs. - // - // If Computed is true, then the result of this value is computed - // (unless specified by config) on creation. - // - // If ForceNew is true, then a change in this resource necessitates - // the creation of a new resource. - // - // StateFunc is a function called to change the value of this before - // storing it in the state (and likewise before comparing for diffs). - // The use for this is for example with large strings, you may want - // to simply store the hash of it. - Computed bool - ForceNew bool - StateFunc SchemaStateFunc - - // The following fields are only set for a TypeList, TypeSet, or TypeMap. - // - // Elem represents the element type. For a TypeMap, it must be a *Schema - // with a Type that is one of the primitives: TypeString, TypeBool, - // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a - // *Resource. If it is *Schema, the element type is just a simple value. - // If it is *Resource, the element type is a complex structure, - // potentially managed via its own CRUD actions on the API. - Elem interface{} - - // The following fields are only set for a TypeList or TypeSet. - // - // MaxItems defines a maximum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however more than one instance would - // cause instability. - // - // MinItems defines a minimum amount of items that can exist within a - // TypeSet or TypeList. Specific use cases would be if a TypeSet is being - // used to wrap a complex structure, however less than one instance would - // cause instability. - // - // If the field Optional is set to true then MinItems is ignored and thus - // effectively zero. - MaxItems int - MinItems int - - // PromoteSingle originally allowed for a single element to be assigned - // where a primitive list was expected, but this no longer works from - // Terraform v0.12 onwards (Terraform Core will require a list to be set - // regardless of what this is set to) and so only applies to Terraform v0.11 - // and earlier, and so should be used only to retain this functionality - // for those still using v0.11 with a provider that formerly used this. - PromoteSingle bool - - // The following fields are only valid for a TypeSet type. - // - // Set defines a function to determine the unique ID of an item so that - // a proper set can be built. - Set SchemaSetFunc - - // ComputedWhen is a set of queries on the configuration. Whenever any - // of these things is changed, it will require a recompute (this requires - // that Computed is set to true). - // - // NOTE: This currently does not work. - ComputedWhen []string - - // ConflictsWith is a set of schema keys that conflict with this schema. - // This will only check that they're set in the _config_. This will not - // raise an error for a malfunctioning resource that sets a conflicting - // key. - ConflictsWith []string - - // When Deprecated is set, this attribute is deprecated. - // - // A deprecated field still works, but will probably stop working in near - // future. This string is the message shown to the user with instructions on - // how to address the deprecation. - Deprecated string - - // When Removed is set, this attribute has been removed from the schema - // - // Removed attributes can be left in the Schema to generate informative error - // messages for the user when they show up in resource configurations. - // This string is the message shown to the user with instructions on - // what do to about the removed attribute. - Removed string - - // ValidateFunc allows individual fields to define arbitrary validation - // logic. It is yielded the provided config value as an interface{} that is - // guaranteed to be of the proper Schema type, and it can yield warnings or - // errors based on inspection of that value. - // - // ValidateFunc is honored only when the schema's Type is set to TypeInt, - // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. - ValidateFunc SchemaValidateFunc - - // Sensitive ensures that the attribute's value does not get displayed in - // logs or regular output. It should be used for passwords or other - // secret fields. Future versions of Terraform may encrypt these - // values. - Sensitive bool -} - -// SchemaConfigMode is used to influence how a schema item is mapped into a -// corresponding configuration construct, using the ConfigMode field of -// Schema. -type SchemaConfigMode int - -const ( - SchemaConfigModeAuto SchemaConfigMode = iota - SchemaConfigModeAttr - SchemaConfigModeBlock -) - -// SchemaDiffSuppressFunc is a function which can be used to determine -// whether a detected diff on a schema element is "valid" or not, and -// suppress it from the plan if necessary. -// -// Return true if the diff should be suppressed, false to retain it. -type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool - -// SchemaDefaultFunc is a function called to return a default value for -// a field. -type SchemaDefaultFunc func() (interface{}, error) - -// EnvDefaultFunc is a helper function that returns the value of the -// given environment variable, if one exists, or the default value -// otherwise. -func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - return v, nil - } - - return dv, nil - } -} - -// MultiEnvDefaultFunc is a helper function that returns the value of the first -// environment variable in the given list that returns a non-empty value. If -// none of the environment variables return a value, the default value is -// returned. -func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { - return func() (interface{}, error) { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v, nil - } - } - return dv, nil - } -} - -// SchemaSetFunc is a function that must return a unique ID for the given -// element. This unique ID is used to store the element in a hash. -type SchemaSetFunc func(interface{}) int - -// SchemaStateFunc is a function used to convert some type to a string -// to be stored in the state. -type SchemaStateFunc func(interface{}) string - -// SchemaValidateFunc is a function used to validate a single field in the -// schema. -type SchemaValidateFunc func(interface{}, string) ([]string, []error) - -func (s *Schema) GoString() string { - return fmt.Sprintf("*%#v", *s) -} - -// Returns a default value for this schema by either reading Default or -// evaluating DefaultFunc. If neither of these are defined, returns nil. -func (s *Schema) DefaultValue() (interface{}, error) { - if s.Default != nil { - return s.Default, nil - } - - if s.DefaultFunc != nil { - defaultValue, err := s.DefaultFunc() - if err != nil { - return nil, fmt.Errorf("error loading default: %s", err) - } - return defaultValue, nil - } - - return nil, nil -} - -// Returns a zero value for the schema. -func (s *Schema) ZeroValue() interface{} { - // If it's a set then we'll do a bit of extra work to provide the - // right hashing function in our empty value. - if s.Type == TypeSet { - setFunc := s.Set - if setFunc == nil { - // Default set function uses the schema to hash the whole value - elem := s.Elem - switch t := elem.(type) { - case *Schema: - setFunc = HashSchema(t) - case *Resource: - setFunc = HashResource(t) - default: - panic("invalid set element type") - } - } - return &Set{F: setFunc} - } else { - return s.Type.Zero() - } -} - -func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { - if d == nil { - return d - } - - if s.Type == TypeBool { - normalizeBoolString := func(s string) string { - switch s { - case "0": - return "false" - case "1": - return "true" - } - return s - } - d.Old = normalizeBoolString(d.Old) - d.New = normalizeBoolString(d.New) - } - - if s.Computed && !d.NewRemoved && d.New == "" { - // Computed attribute without a new value set - d.NewComputed = true - } - - if s.ForceNew { - // ForceNew, mark that this field is requiring new under the - // following conditions, explained below: - // - // * Old != New - There is a change in value. This field - // is therefore causing a new resource. - // - // * NewComputed - This field is being computed, hence a - // potential change in value, mark as causing a new resource. - d.RequiresNew = d.Old != d.New || d.NewComputed - } - - if d.NewRemoved { - return d - } - - if s.Computed { - // FIXME: This is where the customized bool from getChange finally - // comes into play. It allows the previously incorrect behavior - // of an empty string being used as "unset" when the value is - // computed. This should be removed once we can properly - // represent an unset/nil value from the configuration. - if !customized { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil - } - } - - if d.New == "" && !d.NewComputed { - // Computed attribute without a new value set - d.NewComputed = true - } - } - - if s.Sensitive { - // Set the Sensitive flag so output is hidden in the UI - d.Sensitive = true - } - - return d -} - -// InternalMap is used to aid in the transition to the new schema types and -// protocol. The name is not meant to convey any usefulness, as this is not to -// be used directly by any providers. -type InternalMap = schemaMap - -// schemaMap is a wrapper that adds nice functions on top of schemas. -type schemaMap map[string]*Schema - -func (m schemaMap) panicOnError() bool { - if os.Getenv(PanicOnErr) != "" { - return true - } - return false -} - -// Data returns a ResourceData for the given schema, state, and diff. -// -// The diff is optional. -func (m schemaMap) Data( - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*ResourceData, error) { - return &ResourceData{ - schema: m, - state: s, - diff: d, - panicOnError: m.panicOnError(), - }, nil -} - -// DeepCopy returns a copy of this schemaMap. The copy can be safely modified -// without affecting the original. -func (m *schemaMap) DeepCopy() schemaMap { - copy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - return *copy.(*schemaMap) -} - -// Diff returns the diff for a resource given the schema map, -// state, and configuration. -func (m schemaMap) Diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - customizeDiff CustomizeDiffFunc, - meta interface{}, - handleRequiresNew bool) (*terraform.InstanceDiff, error) { - result := new(terraform.InstanceDiff) - result.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Make sure to mark if the resource is tainted - if s != nil { - result.DestroyTainted = s.Tainted - } - - d := &ResourceData{ - schema: m, - state: s, - config: c, - panicOnError: m.panicOnError(), - } - - for k, schema := range m { - err := m.diff(k, schema, result, d, false) - if err != nil { - return nil, err - } - } - - // Remove any nil diffs just to keep things clean - for k, v := range result.Attributes { - if v == nil { - delete(result.Attributes, k) - } - } - - // If this is a non-destroy diff, call any custom diff logic that has been - // defined. - if !result.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, s, result) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result, rd, false) - if err != nil { - return nil, err - } - } - } - - if handleRequiresNew { - // If the diff requires a new resource, then we recompute the diff - // so we have the complete new resource diff, and preserve the - // RequiresNew fields where necessary so the user knows exactly what - // caused that. - if result.RequiresNew() { - // Create the new diff - result2 := new(terraform.InstanceDiff) - result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Preserve the DestroyTainted flag - result2.DestroyTainted = result.DestroyTainted - - // Reset the data to not contain state. We have to call init() - // again in order to reset the FieldReaders. - d.state = nil - d.init() - - // Perform the diff again - for k, schema := range m { - err := m.diff(k, schema, result2, d, false) - if err != nil { - return nil, err - } - } - - // Re-run customization - if !result2.DestroyTainted && customizeDiff != nil { - mc := m.DeepCopy() - rd := newResourceDiff(mc, c, d.state, result2) - if err := customizeDiff(rd, meta); err != nil { - return nil, err - } - for _, k := range rd.UpdatedKeys() { - err := m.diff(k, mc[k], result2, rd, false) - if err != nil { - return nil, err - } - } - } - - // Force all the fields to not force a new since we know what we - // want to force new. - for k, attr := range result2.Attributes { - if attr == nil { - continue - } - - if attr.RequiresNew { - attr.RequiresNew = false - } - - if s != nil { - attr.Old = s.Attributes[k] - } - } - - // Now copy in all the requires new diffs... - for k, attr := range result.Attributes { - if attr == nil { - continue - } - - newAttr, ok := result2.Attributes[k] - if !ok { - newAttr = attr - } - - if attr.RequiresNew { - newAttr.RequiresNew = true - } - - result2.Attributes[k] = newAttr - } - - // And set the diff! - result = result2 - } - - } - - // Go through and detect all of the ComputedWhens now that we've - // finished the diff. - // TODO - - if result.Empty() { - // If we don't have any diff elements, just return nil - return nil, nil - } - - return result, nil -} - -// Input implements the terraform.ResourceProvider method by asking -// for input for required configuration keys that don't have a value. -func (m schemaMap) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - keys := make([]string, 0, len(m)) - for k, _ := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := m[k] - - // Skip things that don't require config, if that is even valid - // for a provider schema. - // Required XOR Optional must always be true to validate, so we only - // need to check one. - if v.Optional { - continue - } - - // Deprecated fields should never prompt - if v.Deprecated != "" { - continue - } - - // Skip things that have a value of some sort already - if _, ok := c.Raw[k]; ok { - continue - } - - // Skip if it has a default value - defaultValue, err := v.DefaultValue() - if err != nil { - return nil, fmt.Errorf("%s: error loading default: %s", k, err) - } - if defaultValue != nil { - continue - } - - var value interface{} - switch v.Type { - case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: - continue - case TypeString: - value, err = m.inputString(input, k, v) - default: - panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) - } - - if err != nil { - return nil, fmt.Errorf( - "%s: %s", k, err) - } - - c.Config[k] = value - } - - return c, nil -} - -// Validate validates the configuration against this schema mapping. -func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return m.validateObject("", m, c) -} - -// InternalValidate validates the format of this schema. This should be called -// from a unit test (and not in user-path code) to verify that a schema -// is properly built. -func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { - return m.internalValidate(topSchemaMap, false) -} - -func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { - if topSchemaMap == nil { - topSchemaMap = m - } - for k, v := range m { - if v.Type == TypeInvalid { - return fmt.Errorf("%s: Type must be specified", k) - } - - if v.Optional && v.Required { - return fmt.Errorf("%s: Optional or Required must be set, not both", k) - } - - if v.Required && v.Computed { - return fmt.Errorf("%s: Cannot be both Required and Computed", k) - } - - if !v.Required && !v.Optional && !v.Computed { - return fmt.Errorf("%s: One of optional, required, or computed must be set", k) - } - - computedOnly := v.Computed && !v.Optional - - switch v.ConfigMode { - case SchemaConfigModeBlock: - if _, ok := v.Elem.(*Resource); !ok { - return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) - } - if attrsOnly { - return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) - } - if computedOnly { - return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) - } - case SchemaConfigModeAttr: - // anything goes - case SchemaConfigModeAuto: - // Since "Auto" for Elem: *Resource would create a nested block, - // and that's impossible inside an attribute, we require it to be - // explicitly overridden as mode "Attr" for clarity. - if _, ok := v.Elem.(*Resource); ok { - if attrsOnly { - return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) - } - } - default: - return fmt.Errorf("%s: invalid ConfigMode value", k) - } - - if v.Computed && v.Default != nil { - return fmt.Errorf("%s: Default must be nil if computed", k) - } - - if v.Required && v.Default != nil { - return fmt.Errorf("%s: Default cannot be set with Required", k) - } - - if len(v.ComputedWhen) > 0 && !v.Computed { - return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) - } - - if len(v.ConflictsWith) > 0 && v.Required { - return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) - } - - if len(v.ConflictsWith) > 0 { - for _, key := range v.ConflictsWith { - parts := strings.Split(key, ".") - sm := topSchemaMap - var target *Schema - for _, part := range parts { - // Skip index fields - if _, err := strconv.Atoi(part); err == nil { - continue - } - - var ok bool - if target, ok = sm[part]; !ok { - return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s) at part (%s)", k, key, part) - } - - if subResource, ok := target.Elem.(*Resource); ok { - sm = schemaMap(subResource.Schema) - } - } - if target == nil { - return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) - } - if target.Required { - return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) - } - - if len(target.ComputedWhen) > 0 { - return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) - } - } - } - - if v.Type == TypeList || v.Type == TypeSet { - if v.Elem == nil { - return fmt.Errorf("%s: Elem must be set for lists", k) - } - - if v.Default != nil { - return fmt.Errorf("%s: Default is not valid for lists or sets", k) - } - - if v.Type != TypeSet && v.Set != nil { - return fmt.Errorf("%s: Set can only be set for TypeSet", k) - } - - switch t := v.Elem.(type) { - case *Resource: - attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr - - if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { - return err - } - case *Schema: - bad := t.Computed || t.Optional || t.Required - if bad { - return fmt.Errorf( - "%s: Elem must have only Type set", k) - } - } - } else { - if v.MaxItems > 0 || v.MinItems > 0 { - return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) - } - } - - // Computed-only field - if v.Computed && !v.Optional { - if v.ValidateFunc != nil { - return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ - "there's nothing to validate on computed-only field", k) - } - if v.DiffSuppressFunc != nil { - return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ - " between config and state representation. "+ - "There is no config for computed-only field, nothing to compare.", k) - } - } - - if v.ValidateFunc != nil { - switch v.Type { - case TypeList, TypeSet: - return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) - } - } - - if v.Deprecated == "" && v.Removed == "" { - if !isValidFieldName(k) { - return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) - } - } - } - - return nil -} - -func isValidFieldName(name string) bool { - re := regexp.MustCompile("^[a-z0-9_]+$") - return re.MatchString(name) -} - -// resourceDiffer is an interface that is used by the private diff functions. -// This helps facilitate diff logic for both ResourceData and ResoureDiff with -// minimal divergence in code. -type resourceDiffer interface { - diffChange(string) (interface{}, interface{}, bool, bool, bool) - Get(string) interface{} - GetChange(string) (interface{}, interface{}) - GetOk(string) (interface{}, bool) - HasChange(string) bool - Id() string -} - -func (m schemaMap) diff( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - - unsupressedDiff := new(terraform.InstanceDiff) - unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - var err error - switch schema.Type { - case TypeBool, TypeInt, TypeFloat, TypeString: - err = m.diffString(k, schema, unsupressedDiff, d, all) - case TypeList: - err = m.diffList(k, schema, unsupressedDiff, d, all) - case TypeMap: - err = m.diffMap(k, schema, unsupressedDiff, d, all) - case TypeSet: - err = m.diffSet(k, schema, unsupressedDiff, d, all) - default: - err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) - } - - for attrK, attrV := range unsupressedDiff.Attributes { - switch rd := d.(type) { - case *ResourceData: - if schema.DiffSuppressFunc != nil && attrV != nil && - schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { - // If this attr diff is suppressed, we may still need it in the - // overall diff if it's contained within a set. Rather than - // dropping the diff, make it a NOOP. - if !all { - continue - } - - attrV = &terraform.ResourceAttrDiff{ - Old: attrV.Old, - New: attrV.Old, - } - } - } - diff.Attributes[attrK] = attrV - } - - return err -} - -func (m schemaMap) diffList( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - o, n, _, computedList, customized := d.diffChange(k) - if computedList { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedList && schema.Computed { - return nil - } - - if o == nil { - o = []interface{}{} - } - if n == nil { - n = []interface{}{} - } - if s, ok := o.(*Set); ok { - o = s.List() - } - if s, ok := n.(*Set); ok { - n = s.List() - } - os := o.([]interface{}) - vs := n.([]interface{}) - - // If the new value was set, and the two are equal, then we're done. - // We have to do this check here because sets might be NOT - // reflect.DeepEqual so we need to wait until we get the []interface{} - if !all && nSet && reflect.DeepEqual(os, vs) { - return nil - } - - // Get the counts - oldLen := len(os) - newLen := len(vs) - oldStr := strconv.FormatInt(int64(oldLen), 10) - - // If the whole list is computed, then say that the # is computed - if computedList { - diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ - Old: oldStr, - NewComputed: true, - RequiresNew: schema.ForceNew, - } - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - computed := oldLen == 0 && newLen == 0 && schema.Computed - if changed || computed || all { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - newStr := "" - if !computed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // Figure out the maximum - maxLen := oldLen - if newLen > maxLen { - maxLen = newLen - } - - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for i := 0; i < maxLen; i++ { - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%d.%s", k, i, k2) - err := m.diff(subK, schema, diff, d, all) - if err != nil { - return err - } - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeList). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - for i := 0; i < maxLen; i++ { - subK := fmt.Sprintf("%s.%d", k, i) - err := m.diff(subK, &t2, diff, d, all) - if err != nil { - return err - } - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - - return nil -} - -func (m schemaMap) diffMap( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - prefix := k + "." - - // First get all the values from the state - var stateMap, configMap map[string]string - o, n, _, nComputed, customized := d.diffChange(k) - if err := mapstructure.WeakDecode(o, &stateMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(n, &configMap); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - // Keep track of whether the state _exists_ at all prior to clearing it - stateExists := o != nil - - // Delete any count values, since we don't use those - delete(configMap, "%") - delete(stateMap, "%") - - // Check if the number of elements has changed. - oldLen, newLen := len(stateMap), len(configMap) - changed := oldLen != newLen - if oldLen != 0 && newLen == 0 && schema.Computed { - changed = false - } - - // It is computed if we have no old value, no new value, the schema - // says it is computed, and it didn't exist in the state before. The - // last point means: if it existed in the state, even empty, then it - // has already been computed. - computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists - - // If the count has changed or we're computed, then add a diff for the - // count. "nComputed" means that the new value _contains_ a value that - // is computed. We don't do granular diffs for this yet, so we mark the - // whole map as computed. - if changed || computed || nComputed { - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed || nComputed, - ForceNew: schema.ForceNew, - } - - oldStr := strconv.FormatInt(int64(oldLen), 10) - newStr := "" - if !computed && !nComputed { - newStr = strconv.FormatInt(int64(newLen), 10) - } else { - oldStr = "" - } - - diff.Attributes[k+".%"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // If the new map is nil and we're computed, then ignore it. - if n == nil && schema.Computed { - return nil - } - - // Now we compare, preferring values from the config map - for k, v := range configMap { - old, ok := stateMap[k] - delete(stateMap, k) - - if old == v && ok && !all { - continue - } - - diff.Attributes[prefix+k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: old, - New: v, - }, - customized, - ) - } - for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }, - customized, - ) - } - - return nil -} - -func (m schemaMap) diffSet( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - - o, n, _, computedSet, customized := d.diffChange(k) - if computedSet { - n = nil - } - nSet := n != nil - - // If we have an old value and no new value is set or will be - // computed once all variables can be interpolated and we're - // computed, then nothing has changed. - if o != nil && n == nil && !computedSet && schema.Computed { - return nil - } - - if o == nil { - o = schema.ZeroValue().(*Set) - } - if n == nil { - n = schema.ZeroValue().(*Set) - } - os := o.(*Set) - ns := n.(*Set) - - // If the new value was set, compare the listCode's to determine if - // the two are equal. Comparing listCode's instead of the actual values - // is needed because there could be computed values in the set which - // would result in false positives while comparing. - if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { - return nil - } - - // Get the counts - oldLen := os.Len() - newLen := ns.Len() - oldStr := strconv.Itoa(oldLen) - newStr := strconv.Itoa(newLen) - - // Build a schema for our count - countSchema := &Schema{ - Type: TypeInt, - Computed: schema.Computed, - ForceNew: schema.ForceNew, - } - - // If the set computed then say that the # is computed - if computedSet || schema.Computed && !nSet { - // If # already exists, equals 0 and no new set is supplied, there - // is nothing to record in the diff - count, ok := d.GetOk(k + ".#") - if ok && count.(int) == 0 && !nSet && !computedSet { - return nil - } - - // Set the count but make sure that if # does not exist, we don't - // use the zeroed value - countStr := strconv.Itoa(count.(int)) - if !ok { - countStr = "" - } - - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }, - customized, - ) - return nil - } - - // If the counts are not the same, then record that diff - changed := oldLen != newLen - if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }, - customized, - ) - } - - // Build the list of codes that will make up our set. This is the - // removed codes as well as all the codes in the new codes. - codes := make([][]string, 2) - codes[0] = os.Difference(ns).listCode() - codes[1] = ns.listCode() - for _, list := range codes { - for _, code := range list { - switch t := schema.Elem.(type) { - case *Resource: - // This is a complex resource - for k2, schema := range t.Schema { - subK := fmt.Sprintf("%s.%s.%s", k, code, k2) - err := m.diff(subK, schema, diff, d, true) - if err != nil { - return err - } - } - case *Schema: - // Copy the schema so that we can set Computed/ForceNew from - // the parent schema (the TypeSet). - t2 := *t - t2.ForceNew = schema.ForceNew - - // This is just a primitive element, so go through each and - // just diff each. - subK := fmt.Sprintf("%s.%s", k, code) - err := m.diff(subK, &t2, diff, d, true) - if err != nil { - return err - } - default: - return fmt.Errorf("%s: unknown element type (internal)", k) - } - } - } - - return nil -} - -func (m schemaMap) diffString( - k string, - schema *Schema, - diff *terraform.InstanceDiff, - d resourceDiffer, - all bool) error { - var originalN interface{} - var os, ns string - o, n, _, computed, customized := d.diffChange(k) - if schema.StateFunc != nil && n != nil { - originalN = n - n = schema.StateFunc(n) - } - nraw := n - if nraw == nil && o != nil { - nraw = schema.Type.Zero() - } - if err := mapstructure.WeakDecode(o, &os); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - if err := mapstructure.WeakDecode(nraw, &ns); err != nil { - return fmt.Errorf("%s: %s", k, err) - } - - if os == ns && !all && !computed { - // They're the same value. If there old value is not blank or we - // have an ID, then return right away since we're already setup. - if os != "" || d.Id() != "" { - return nil - } - - // Otherwise, only continue if we're computed - if !schema.Computed { - return nil - } - } - - removed := false - if o != nil && n == nil && !computed { - removed = true - } - if removed && schema.Computed { - return nil - } - - diff.Attributes[k] = schema.finalizeDiff( - &terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }, - customized, - ) - - return nil -} - -func (m schemaMap) inputString( - input terraform.UIInput, - k string, - schema *Schema) (interface{}, error) { - result, err := input.Input(context.Background(), &terraform.InputOpts{ - Id: k, - Query: k, - Description: schema.Description, - Default: schema.InputDefault, - }) - - return result, err -} - -func (m schemaMap) validate( - k string, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, ok := c.Get(k) - if !ok && schema.DefaultFunc != nil { - // We have a dynamic default. Check if we have a value. - var err error - raw, err = schema.DefaultFunc() - if err != nil { - return nil, []error{fmt.Errorf( - "%q, error loading default: %s", k, err)} - } - - // We're okay as long as we had a value set - ok = raw != nil - } - if !ok { - if schema.Required { - return nil, []error{fmt.Errorf( - "%q: required field is not set", k)} - } - - return nil, nil - } - - if !schema.Required && !schema.Optional { - // This is a computed-only field - return nil, []error{fmt.Errorf( - "%q: this field cannot be set", k)} - } - - // If the value is unknown then we can't validate it yet. - // In particular, this avoids spurious type errors where downstream - // validation code sees UnknownVariableValue as being just a string. - // The SDK has to allow the unknown value through initially, so that - // Required fields set via an interpolated value are accepted. - if !isWhollyKnown(raw) { - if schema.Deprecated != "" { - return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil - } - return nil, nil - } - - err := m.validateConflictingAttributes(k, schema, c) - if err != nil { - return nil, []error{err} - } - - return m.validateType(k, raw, schema, c) -} - -// isWhollyKnown returns false if the argument contains an UnknownVariableValue -func isWhollyKnown(raw interface{}) bool { - switch raw := raw.(type) { - case string: - if raw == hcl2shim.UnknownVariableValue { - return false - } - case []interface{}: - for _, v := range raw { - if !isWhollyKnown(v) { - return false - } - } - case map[string]interface{}: - for _, v := range raw { - if !isWhollyKnown(v) { - return false - } - } - } - return true -} -func (m schemaMap) validateConflictingAttributes( - k string, - schema *Schema, - c *terraform.ResourceConfig) error { - - if len(schema.ConflictsWith) == 0 { - return nil - } - - for _, conflictingKey := range schema.ConflictsWith { - if raw, ok := c.Get(conflictingKey); ok { - if raw == hcl2shim.UnknownVariableValue { - // An unknown value might become unset (null) once known, so - // we must defer validation until it's known. - continue - } - return fmt.Errorf( - "%q: conflicts with %s", k, conflictingKey) - } - } - - return nil -} - -func (m schemaMap) validateList( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // first check if the list is wholly unknown - if s, ok := raw.(string); ok { - if s == hcl2shim.UnknownVariableValue { - return nil, nil - } - } - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - - // If we support promotion and the raw value isn't a slice, wrap - // it in []interface{} and check again. - if schema.PromoteSingle && rawV.Kind() != reflect.Slice { - raw = []interface{}{raw} - rawV = reflect.ValueOf(raw) - } - - if rawV.Kind() != reflect.Slice { - return nil, []error{fmt.Errorf( - "%s: should be a list", k)} - } - - // We can't validate list length if this came from a dynamic block. - // Since there's no way to determine if something was from a dynamic block - // at this point, we're going to skip validation in the new protocol if - // there are any unknowns. Validate will eventually be called again once - // all values are known. - if isProto5() && !isWhollyKnown(raw) { - return nil, nil - } - - // Validate length - if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} - } - - if schema.MinItems > 0 && rawV.Len() < schema.MinItems { - return nil, []error{fmt.Errorf( - "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} - } - - // Now build the []interface{} - raws := make([]interface{}, rawV.Len()) - for i, _ := range raws { - raws[i] = rawV.Index(i).Interface() - } - - var ws []string - var es []error - for i, raw := range raws { - key := fmt.Sprintf("%s.%d", k, i) - - // Reify the key value from the ResourceConfig. - // If the list was computed we have all raw values, but some of these - // may be known in the config, and aren't individually marked as Computed. - if r, ok := c.Get(key); ok { - raw = r - } - - var ws2 []string - var es2 []error - switch t := schema.Elem.(type) { - case *Resource: - // This is a sub-resource - ws2, es2 = m.validateObject(key, t.Schema, c) - case *Schema: - ws2, es2 = m.validateType(key, raw, t, c) - } - - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - return ws, es -} - -func (m schemaMap) validateMap( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - // first check if the list is wholly unknown - if s, ok := raw.(string); ok { - if s == hcl2shim.UnknownVariableValue { - return nil, nil - } - } - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - // We use reflection to verify the slice because you can't - // case to []interface{} unless the slice is exactly that type. - rawV := reflect.ValueOf(raw) - switch rawV.Kind() { - case reflect.String: - // If raw and reified are equal, this is a string and should - // be rejected. - reified, reifiedOk := c.Get(k) - if reifiedOk && raw == reified && !c.IsComputed(k) { - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - // Otherwise it's likely raw is an interpolation. - return nil, nil - case reflect.Map: - case reflect.Slice: - default: - return nil, []error{fmt.Errorf("%s: should be a map", k)} - } - - // If it is not a slice, validate directly - if rawV.Kind() != reflect.Slice { - mapIface := rawV.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - if schema.ValidateFunc != nil { - return schema.ValidateFunc(mapIface, k) - } - return nil, nil - } - - // It is a slice, verify that all the elements are maps - raws := make([]interface{}, rawV.Len()) - for i, _ := range raws { - raws[i] = rawV.Index(i).Interface() - } - - for _, raw := range raws { - v := reflect.ValueOf(raw) - if v.Kind() != reflect.Map { - return nil, []error{fmt.Errorf( - "%s: should be a map", k)} - } - mapIface := v.Interface() - if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { - return nil, errs - } - } - - if schema.ValidateFunc != nil { - validatableMap := make(map[string]interface{}) - for _, raw := range raws { - for k, v := range raw.(map[string]interface{}) { - validatableMap[k] = v - } - } - - return schema.ValidateFunc(validatableMap, k) - } - - return nil, nil -} - -func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { - for key, raw := range m { - valueType, err := getValueType(k, schema) - if err != nil { - return nil, []error{err} - } - - switch valueType { - case TypeBool: - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeInt: - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeFloat: - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - case TypeString: - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} - } - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - } - return nil, nil -} - -func getValueType(k string, schema *Schema) (ValueType, error) { - if schema.Elem == nil { - return TypeString, nil - } - if vt, ok := schema.Elem.(ValueType); ok { - return vt, nil - } - - // If a Schema is provided to a Map, we use the Type of that schema - // as the type for each element in the Map. - if s, ok := schema.Elem.(*Schema); ok { - return s.Type, nil - } - - if _, ok := schema.Elem.(*Resource); ok { - // TODO: We don't actually support this (yet) - // but silently pass the validation, until we decide - // how to handle nested structures in maps - return TypeString, nil - } - return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) -} - -func (m schemaMap) validateObject( - k string, - schema map[string]*Schema, - c *terraform.ResourceConfig) ([]string, []error) { - raw, _ := c.Get(k) - - // schemaMap can't validate nil - if raw == nil { - return nil, nil - } - - if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { - return nil, []error{fmt.Errorf( - "%s: expected object, got %s", - k, reflect.ValueOf(raw).Kind())} - } - - var ws []string - var es []error - for subK, s := range schema { - key := subK - if k != "" { - key = fmt.Sprintf("%s.%s", k, subK) - } - - ws2, es2 := m.validate(key, s, c) - if len(ws2) > 0 { - ws = append(ws, ws2...) - } - if len(es2) > 0 { - es = append(es, es2...) - } - } - - // Detect any extra/unknown keys and report those as errors. - if m, ok := raw.(map[string]interface{}); ok { - for subk, _ := range m { - if _, ok := schema[subk]; !ok { - if subk == TimeoutsConfigKey { - continue - } - es = append(es, fmt.Errorf( - "%s: invalid or unknown key: %s", k, subk)) - } - } - } - - return ws, es -} - -func (m schemaMap) validatePrimitive( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - - // a nil value shouldn't happen in the old protocol, and in the new - // protocol the types have already been validated. Either way, we can't - // reflect on nil, so don't panic. - if raw == nil { - return nil, nil - } - - // Catch if the user gave a complex type where a primitive was - // expected, so we can return a friendly error message that - // doesn't contain Go type system terminology. - switch reflect.ValueOf(raw).Type().Kind() { - case reflect.Slice: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a list", k), - } - case reflect.Map: - return nil, []error{ - fmt.Errorf("%s must be a single value, not a map", k), - } - default: // ok - } - - if c.IsComputed(k) { - // If the key is being computed, then it is not an error as - // long as it's not a slice or map. - return nil, nil - } - - var decoded interface{} - switch schema.Type { - case TypeBool: - // Verify that we can parse this as the correct type - var n bool - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeInt: - switch { - case isProto5(): - // We need to verify the type precisely, because WeakDecode will - // decode a float as an integer. - - // the config shims only use int for integral number values - if v, ok := raw.(int); ok { - decoded = v - } else { - return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} - } - default: - // Verify that we can parse this as an int - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - } - case TypeFloat: - // Verify that we can parse this as an int - var n float64 - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - case TypeString: - // Verify that we can parse this as a string - var n string - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} - } - decoded = n - default: - panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) - } - - if schema.ValidateFunc != nil { - return schema.ValidateFunc(decoded, k) - } - - return nil, nil -} - -func (m schemaMap) validateType( - k string, - raw interface{}, - schema *Schema, - c *terraform.ResourceConfig) ([]string, []error) { - var ws []string - var es []error - switch schema.Type { - case TypeSet, TypeList: - ws, es = m.validateList(k, raw, schema, c) - case TypeMap: - ws, es = m.validateMap(k, raw, schema, c) - default: - ws, es = m.validatePrimitive(k, raw, schema, c) - } - - if schema.Deprecated != "" { - ws = append(ws, fmt.Sprintf( - "%q: [DEPRECATED] %s", k, schema.Deprecated)) - } - - if schema.Removed != "" { - es = append(es, fmt.Errorf( - "%q: [REMOVED] %s", k, schema.Removed)) - } - - return ws, es -} - -// Zero returns the zero value for a type. -func (t ValueType) Zero() interface{} { - switch t { - case TypeInvalid: - return nil - case TypeBool: - return false - case TypeInt: - return 0 - case TypeFloat: - return 0.0 - case TypeString: - return "" - case TypeList: - return []interface{}{} - case TypeMap: - return map[string]interface{}{} - case TypeSet: - return new(Set) - case typeObject: - return map[string]interface{}{} - default: - panic(fmt.Sprintf("unknown type %s", t)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go deleted file mode 100644 index 8ee89e47..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go +++ /dev/null @@ -1,250 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" - "sync" - - "github.com/hashicorp/terraform/helper/hashcode" -) - -// HashString hashes strings. If you want a Set of strings, this is the -// SchemaSetFunc you want. -func HashString(v interface{}) int { - return hashcode.String(v.(string)) -} - -// HashInt hashes integers. If you want a Set of integers, this is the -// SchemaSetFunc you want. -func HashInt(v interface{}) int { - return hashcode.String(strconv.Itoa(v.(int))) -} - -// HashResource hashes complex structures that are described using -// a *Resource. This is the default set implementation used when a set's -// element type is a full resource. -func HashResource(resource *Resource) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeResourceForHash(&buf, v, resource) - return hashcode.String(buf.String()) - } -} - -// HashSchema hashes values that are described using a *Schema. This is the -// default set implementation used when a set's element type is a single -// schema. -func HashSchema(schema *Schema) SchemaSetFunc { - return func(v interface{}) int { - var buf bytes.Buffer - SerializeValueForHash(&buf, v, schema) - return hashcode.String(buf.String()) - } -} - -// Set is a set data structure that is returned for elements of type -// TypeSet. -type Set struct { - F SchemaSetFunc - - m map[string]interface{} - once sync.Once -} - -// NewSet is a convenience method for creating a new set with the given -// items. -func NewSet(f SchemaSetFunc, items []interface{}) *Set { - s := &Set{F: f} - for _, i := range items { - s.Add(i) - } - - return s -} - -// CopySet returns a copy of another set. -func CopySet(otherSet *Set) *Set { - return NewSet(otherSet.F, otherSet.List()) -} - -// Add adds an item to the set if it isn't already in the set. -func (s *Set) Add(item interface{}) { - s.add(item, false) -} - -// Remove removes an item if it's already in the set. Idempotent. -func (s *Set) Remove(item interface{}) { - s.remove(item) -} - -// Contains checks if the set has the given item. -func (s *Set) Contains(item interface{}) bool { - _, ok := s.m[s.hash(item)] - return ok -} - -// Len returns the amount of items in the set. -func (s *Set) Len() int { - return len(s.m) -} - -// List returns the elements of this set in slice format. -// -// The order of the returned elements is deterministic. Given the same -// set, the order of this will always be the same. -func (s *Set) List() []interface{} { - result := make([]interface{}, len(s.m)) - for i, k := range s.listCode() { - result[i] = s.m[k] - } - - return result -} - -// Difference performs a set difference of the two sets, returning -// a new third set that has only the elements unique to this set. -func (s *Set) Difference(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; !ok { - result.m[k] = v - } - } - - return result -} - -// Intersection performs the set intersection of the two sets -// and returns a new third set. -func (s *Set) Intersection(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - if _, ok := other.m[k]; ok { - result.m[k] = v - } - } - - return result -} - -// Union performs the set union of the two sets and returns a new third -// set. -func (s *Set) Union(other *Set) *Set { - result := &Set{F: s.F} - result.once.Do(result.init) - - for k, v := range s.m { - result.m[k] = v - } - for k, v := range other.m { - result.m[k] = v - } - - return result -} - -func (s *Set) Equal(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - return reflect.DeepEqual(s.m, other.m) -} - -// HashEqual simply checks to the keys the top-level map to the keys in the -// other set's top-level map to see if they are equal. This obviously assumes -// you have a properly working hash function - use HashResource if in doubt. -func (s *Set) HashEqual(raw interface{}) bool { - other, ok := raw.(*Set) - if !ok { - return false - } - - ks1 := make([]string, 0) - ks2 := make([]string, 0) - - for k := range s.m { - ks1 = append(ks1, k) - } - for k := range other.m { - ks2 = append(ks2, k) - } - - sort.Strings(ks1) - sort.Strings(ks2) - - return reflect.DeepEqual(ks1, ks2) -} - -func (s *Set) GoString() string { - return fmt.Sprintf("*Set(%#v)", s.m) -} - -func (s *Set) init() { - s.m = make(map[string]interface{}) -} - -func (s *Set) add(item interface{}, computed bool) string { - s.once.Do(s.init) - - code := s.hash(item) - if computed { - code = "~" + code - - if isProto5() { - tmpCode := code - count := 0 - for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { - count++ - tmpCode = fmt.Sprintf("%s%d", code, count) - } - code = tmpCode - } - } - - if _, ok := s.m[code]; !ok { - s.m[code] = item - } - - return code -} - -func (s *Set) hash(item interface{}) string { - code := s.F(item) - // Always return a nonnegative hashcode. - if code < 0 { - code = -code - } - return strconv.Itoa(code) -} - -func (s *Set) remove(item interface{}) string { - s.once.Do(s.init) - - code := s.hash(item) - delete(s.m, code) - - return code -} - -func (s *Set) index(item interface{}) int { - return sort.SearchStrings(s.listCode(), s.hash(item)) -} - -func (s *Set) listCode() []string { - // Sort the hash codes so the order of the list is deterministic - keys := make([]string, 0, len(s.m)) - for k := range s.m { - keys = append(keys, k) - } - sort.Sort(sort.StringSlice(keys)) - return keys -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go deleted file mode 100644 index d2dbff53..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go +++ /dev/null @@ -1,115 +0,0 @@ -package schema - -import ( - "encoding/json" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" -) - -// DiffFromValues takes the current state and desired state as cty.Values and -// derives a terraform.InstanceDiff to give to the legacy providers. This is -// used to take the states provided by the new ApplyResourceChange method and -// convert them to a state+diff required for the legacy Apply method. -func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { - return diffFromValues(prior, planned, res, nil) -} - -// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our -// test fixtures from the legacy tests. In the new provider protocol the diff -// only needs to be created for the apply operation, and any customizations -// have already been done. -func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { - instanceState, err := res.ShimInstanceStateFromValue(prior) - if err != nil { - return nil, err - } - - configSchema := res.CoreConfigSchema() - - cfg := terraform.NewResourceConfigShimmed(planned, configSchema) - removeConfigUnknowns(cfg.Config) - removeConfigUnknowns(cfg.Raw) - - diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) - if err != nil { - return nil, err - } - - return diff, err -} - -// During apply the only unknown values are those which are to be computed by -// the resource itself. These may have been marked as unknown config values, and -// need to be removed to prevent the UnknownVariableValue from appearing the diff. -func removeConfigUnknowns(cfg map[string]interface{}) { - for k, v := range cfg { - switch v := v.(type) { - case string: - if v == hcl2shim.UnknownVariableValue { - delete(cfg, k) - } - case []interface{}: - for _, i := range v { - if m, ok := i.(map[string]interface{}); ok { - removeConfigUnknowns(m) - } - } - case map[string]interface{}: - removeConfigUnknowns(v) - } - } -} - -// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to -// get a new cty.Value state. This is used to convert the diff returned from -// the legacy provider Diff method to the state required for the new -// PlanResourceChange method. -func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { - return d.ApplyToValue(base, schema) -} - -// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON -// encoding. -func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { - js, err := ctyjson.Marshal(val, ty) - if err != nil { - return nil, err - } - - var m map[string]interface{} - if err := json.Unmarshal(js, &m); err != nil { - return nil, err - } - - return m, nil -} - -// JSONMapToStateValue takes a generic json map[string]interface{} and converts it -// to the specific type, ensuring that the values conform to the schema. -func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { - var val cty.Value - - js, err := json.Marshal(m) - if err != nil { - return val, err - } - - val, err = ctyjson.Unmarshal(js, block.ImpliedType()) - if err != nil { - return val, err - } - - return block.CoerceValue(val) -} - -// StateValueFromInstanceState converts a terraform.InstanceState to a -// cty.Value as described by the provided cty.Type, and maintains the resource -// ID as the "id" attribute. -func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { - return is.AttrsAsObjectValue(ty) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go deleted file mode 100644 index 12278217..00000000 --- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go +++ /dev/null @@ -1,28 +0,0 @@ -package schema - -import ( - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -// TestResourceDataRaw creates a ResourceData from a raw configuration map. -func TestResourceDataRaw( - t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { - t.Helper() - - c := terraform.NewResourceConfigRaw(raw) - - sm := schemaMap(schema) - diff, err := sm.Diff(nil, c, nil, nil, true) - if err != nil { - t.Fatalf("err: %s", err) - } - - result, err := sm.Data(nil, diff) - if err != nil { - t.Fatalf("err: %s", err) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/client.go b/vendor/github.com/hashicorp/terraform/httpclient/client.go deleted file mode 100644 index bb06beb4..00000000 --- a/vendor/github.com/hashicorp/terraform/httpclient/client.go +++ /dev/null @@ -1,18 +0,0 @@ -package httpclient - -import ( - "net/http" - - cleanhttp "github.com/hashicorp/go-cleanhttp" -) - -// New returns the DefaultPooledClient from the cleanhttp -// package that will also send a Terraform User-Agent string. -func New() *http.Client { - cli := cleanhttp.DefaultPooledClient() - cli.Transport = &userAgentRoundTripper{ - userAgent: UserAgentString(), - inner: cli.Transport, - } - return cli -} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go deleted file mode 100644 index 536703c6..00000000 --- a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go +++ /dev/null @@ -1,56 +0,0 @@ -package httpclient - -import ( - "fmt" - "log" - "net/http" - "os" - "strings" - - "github.com/hashicorp/terraform/version" -) - -const userAgentFormat = "Terraform/%s" -const uaEnvVar = "TF_APPEND_USER_AGENT" - -// Deprecated: Use UserAgent(version) instead -func UserAgentString() string { - ua := fmt.Sprintf(userAgentFormat, version.Version) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} - -type userAgentRoundTripper struct { - inner http.RoundTripper - userAgent string -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", rt.userAgent) - } - log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) - return rt.inner.RoundTrip(req) -} - -func TerraformUserAgent(version string) string { - ua := fmt.Sprintf("HashiCorp Terraform/%s (+https://www.terraform.io)", version) - - if add := os.Getenv(uaEnvVar); add != "" { - add = strings.TrimSpace(add) - if len(add) > 0 { - ua += " " + add - log.Printf("[DEBUG] Using modified User-Agent: %s", ua) - } - } - - return ua -} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go deleted file mode 100644 index ee1adb0c..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go +++ /dev/null @@ -1,123 +0,0 @@ -package earlyconfig - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/moduledeps" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/tfdiags" -) - -// A Config is a node in the tree of modules within a configuration. -// -// The module tree is constructed by following ModuleCall instances recursively -// through the root module transitively into descendent modules. -type Config struct { - // RootModule points to the Config for the root module within the same - // module tree as this module. If this module _is_ the root module then - // this is self-referential. - Root *Config - - // ParentModule points to the Config for the module that directly calls - // this module. If this is the root module then this field is nil. - Parent *Config - - // Path is a sequence of module logical names that traverse from the root - // module to this config. Path is empty for the root module. - // - // This should only be used to display paths to the end-user in rare cases - // where we are talking about the static module tree, before module calls - // have been resolved. In most cases, an addrs.ModuleInstance describing - // a node in the dynamic module tree is better, since it will then include - // any keys resulting from evaluating "count" and "for_each" arguments. - Path addrs.Module - - // ChildModules points to the Config for each of the direct child modules - // called from this module. The keys in this map match the keys in - // Module.ModuleCalls. - Children map[string]*Config - - // Module points to the object describing the configuration for the - // various elements (variables, resources, etc) defined by this module. - Module *tfconfig.Module - - // CallPos is the source position for the header of the module block that - // requested this module. - // - // This field is meaningless for the root module, where its contents are undefined. - CallPos tfconfig.SourcePos - - // SourceAddr is the source address that the referenced module was requested - // from, as specified in configuration. - // - // This field is meaningless for the root module, where its contents are undefined. - SourceAddr string - - // Version is the specific version that was selected for this module, - // based on version constraints given in configuration. - // - // This field is nil if the module was loaded from a non-registry source, - // since versions are not supported for other sources. - // - // This field is meaningless for the root module, where it will always - // be nil. - Version *version.Version -} - -// ProviderDependencies returns the provider dependencies for the recieving -// config, including all of its descendent modules. -func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var name string - if len(c.Path) > 0 { - name = c.Path[len(c.Path)-1] - } - - ret := &moduledeps.Module{ - Name: name, - } - - providers := make(moduledeps.Providers) - for name, reqs := range c.Module.RequiredProviders { - inst := moduledeps.ProviderInstance(name) - var constraints version.Constraints - for _, reqStr := range reqs.VersionConstraints { - if reqStr != "" { - constraint, err := version.NewConstraint(reqStr) - if err != nil { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid provider version constraint", - Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, name), - })) - continue - } - constraints = append(constraints, constraint...) - } - } - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.NewConstraints(constraints), - Reason: moduledeps.ProviderDependencyExplicit, - } - } - ret.Providers = providers - - childNames := make([]string, 0, len(c.Children)) - for name := range c.Children { - childNames = append(childNames, name) - } - sort.Strings(childNames) - - for _, name := range childNames { - child, childDiags := c.Children[name].ProviderDependencies() - ret.Children = append(ret.Children, child) - diags = diags.Append(childDiags) - } - - return ret, diags -} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go deleted file mode 100644 index 770d5dfb..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go +++ /dev/null @@ -1,144 +0,0 @@ -package earlyconfig - -import ( - "fmt" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" -) - -// BuildConfig constructs a Config from a root module by loading all of its -// descendent modules via the given ModuleWalker. -func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - cfg := &Config{ - Module: root, - } - cfg.Root = cfg // Root module is self-referential. - cfg.Children, diags = buildChildModules(cfg, walker) - return cfg, diags -} - -func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret := map[string]*Config{} - calls := parent.Module.ModuleCalls - - // We'll sort the calls by their local names so that they'll appear in a - // predictable order in any logging that's produced during the walk. - callNames := make([]string, 0, len(calls)) - for k := range calls { - callNames = append(callNames, k) - } - sort.Strings(callNames) - - for _, callName := range callNames { - call := calls[callName] - path := make([]string, len(parent.Path)+1) - copy(path, parent.Path) - path[len(path)-1] = call.Name - - var vc version.Constraints - if strings.TrimSpace(call.Version) != "" { - var err error - vc, err = version.NewConstraint(call.Version) - if err != nil { - diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ - Severity: tfconfig.DiagError, - Summary: "Invalid version constraint", - Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), - })) - continue - } - } - - req := ModuleRequest{ - Name: call.Name, - Path: path, - SourceAddr: call.Source, - VersionConstraints: vc, - Parent: parent, - CallPos: call.Pos, - } - - mod, ver, modDiags := walker.LoadModule(&req) - diags = append(diags, modDiags...) - if mod == nil { - // nil can be returned if the source address was invalid and so - // nothing could be loaded whatsoever. LoadModule should've - // returned at least one error diagnostic in that case. - continue - } - - child := &Config{ - Parent: parent, - Root: parent.Root, - Path: path, - Module: mod, - CallPos: call.Pos, - SourceAddr: call.Source, - Version: ver, - } - - child.Children, modDiags = buildChildModules(child, walker) - diags = diags.Append(modDiags) - - ret[call.Name] = child - } - - return ret, diags -} - -// ModuleRequest is used as part of the ModuleWalker interface used with -// function BuildConfig. -type ModuleRequest struct { - // Name is the "logical name" of the module call within configuration. - // This is provided in case the name is used as part of a storage key - // for the module, but implementations must otherwise treat it as an - // opaque string. It is guaranteed to have already been validated as an - // HCL identifier and UTF-8 encoded. - Name string - - // Path is a list of logical names that traverse from the root module to - // this module. This can be used, for example, to form a lookup key for - // each distinct module call in a configuration, allowing for multiple - // calls with the same name at different points in the tree. - Path addrs.Module - - // SourceAddr is the source address string provided by the user in - // configuration. - SourceAddr string - - // VersionConstraint is the version constraint applied to the module in - // configuration. - VersionConstraints version.Constraints - - // Parent is the partially-constructed module tree node that the loaded - // module will be added to. Callers may refer to any field of this - // structure except Children, which is still under construction when - // ModuleRequest objects are created and thus has undefined content. - // The main reason this is provided is so that full module paths can - // be constructed for uniqueness. - Parent *Config - - // CallRange is the source position for the header of the "module" block - // in configuration that prompted this request. - CallPos tfconfig.SourcePos -} - -// ModuleWalker is an interface used with BuildConfig. -type ModuleWalker interface { - LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) -} - -// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps -// a callback function, for more convenient use of that interface. -type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) - -func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - return f(req) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go deleted file mode 100644 index 9b2fd7f7..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go +++ /dev/null @@ -1,78 +0,0 @@ -package earlyconfig - -import ( - "fmt" - - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/tfdiags" -) - -func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { - ret := make(tfdiags.Diagnostics, len(diags)) - for i, diag := range diags { - ret[i] = wrapDiagnostic(diag) - } - return ret -} - -func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { - return wrappedDiagnostic{ - d: diag, - } -} - -type wrappedDiagnostic struct { - d tfconfig.Diagnostic -} - -func (d wrappedDiagnostic) Severity() tfdiags.Severity { - switch d.d.Severity { - case tfconfig.DiagError: - return tfdiags.Error - case tfconfig.DiagWarning: - return tfdiags.Warning - default: - // Should never happen since there are no other severities - return 0 - } -} - -func (d wrappedDiagnostic) Description() tfdiags.Description { - // Since the inspect library doesn't produce precise source locations, - // we include the position information as part of the error message text. - // See the comment inside method "Source" for more information. - switch { - case d.d.Pos == nil: - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: d.d.Detail, - } - case d.d.Detail != "": - return tfdiags.Description{ - Summary: d.d.Summary, - Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), - } - default: - return tfdiags.Description{ - Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), - } - } -} - -func (d wrappedDiagnostic) Source() tfdiags.Source { - // Since the inspect library is constrained by the lowest common denominator - // between legacy HCL and modern HCL, it only returns ranges at whole-line - // granularity, and that isn't sufficient to populate a tfdiags.Source - // and so we'll just omit ranges altogether and include the line number in - // the Description text. - // - // Callers that want to return nicer errors should consider reacting to - // earlyconfig errors by attempting a follow-up parse with the normal - // config loader, which can produce more precise source location - // information. - return tfdiags.Source{} -} - -func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go deleted file mode 100644 index a9cf10f3..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package earlyconfig is a specialized alternative to the top-level "configs" -// package that does only shallow processing of configuration and is therefore -// able to be much more liberal than the full config loader in what it accepts. -// -// In particular, it can accept both current and legacy HCL syntax, and it -// ignores top-level blocks that it doesn't recognize. These two characteristics -// make this package ideal for dependency-checking use-cases so that we are -// more likely to be able to return an error message about an explicit -// incompatibility than to return a less-actionable message about a construct -// not being supported. -// -// However, its liberal approach also means it should be used sparingly. It -// exists primarily for "terraform init", so that it is able to detect -// incompatibilities more robustly when installing dependencies. For most -// other use-cases, use the "configs" and "configs/configload" packages. -// -// Package earlyconfig is a wrapper around the terraform-config-inspect -// codebase, adding to it just some helper functionality for Terraform's own -// use-cases. -package earlyconfig diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go deleted file mode 100644 index d2d62879..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go +++ /dev/null @@ -1,13 +0,0 @@ -package earlyconfig - -import ( - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/tfdiags" -) - -// LoadModule loads some top-level metadata for the module in the given -// directory. -func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { - mod, diags := tfconfig.LoadModule(dir) - return mod, wrapDiagnostics(diags) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go deleted file mode 100644 index 7096ff74..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go +++ /dev/null @@ -1,125 +0,0 @@ -package initwd - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // we don't want to try and copy the same file over itself. - if eq, err := sameFile(path, dstPath); eq { - return nil - } else if err != nil { - return err - } - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If the current path is a symlink, recreate the symlink relative to - // the dst directory - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - target, err := os.Readlink(path) - if err != nil { - return err - } - - return os.Symlink(target, dstPath) - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} - -// sameFile tried to determine if to paths are the same file. -// If the paths don't match, we lookup the inode on supported systems. -func sameFile(a, b string) (bool, error) { - if a == b { - return true, nil - } - - aIno, err := inode(a) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - bIno, err := inode(b) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - if aIno > 0 && aIno == bIno { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go deleted file mode 100644 index b9d938db..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package initwd contains various helper functions used by the "terraform init" -// command to initialize a working directory. -// -// These functions may also be used from testing code to simulate the behaviors -// of "terraform init" against test fixtures, but should not be used elsewhere -// in the main code. -package initwd diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go deleted file mode 100644 index 6b40d08d..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go +++ /dev/null @@ -1,363 +0,0 @@ -package initwd - -import ( - "fmt" - "github.com/hashicorp/terraform/internal/earlyconfig" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/registry" - "github.com/hashicorp/terraform/tfdiags" -) - -const initFromModuleRootCallName = "root" -const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." - -// DirFromModule populates the given directory (which must exist and be -// empty) with the contents of the module at the given source address. -// -// It does this by installing the given module and all of its descendent -// modules in a temporary root directory and then copying the installed -// files into suitable locations. As a consequence, any diagnostics it -// generates will reveal the location of this temporary directory to the -// user. -// -// This rather roundabout installation approach is taken to ensure that -// installation proceeds in a manner identical to normal module installation. -// -// If the given source address specifies a sub-directory of the given -// package then only the sub-directory and its descendents will be copied -// into the given root directory, which will cause any relative module -// references using ../ from that module to be unresolvable. Error diagnostics -// are produced in that case, to prompt the user to rewrite the source strings -// to be absolute references to the original remote module. -func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // The way this function works is pretty ugly, but we accept it because - // -from-module is a less important case than normal module installation - // and so it's better to keep this ugly complexity out here rather than - // adding even more complexity to the normal module installer. - - // The target directory must exist but be empty. - { - entries, err := ioutil.ReadDir(rootDir) - if err != nil { - if os.IsNotExist(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Target directory does not exist", - fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read target directory", - fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), - )) - } - return diags - } - haveEntries := false - for _, entry := range entries { - if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { - continue - } - haveEntries = true - } - if haveEntries { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Can't populate non-empty directory", - fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), - )) - return diags - } - } - - instDir := filepath.Join(rootDir, ".terraform/init-from-module") - inst := NewModuleInstaller(instDir, reg) - log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) - os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too - err := os.MkdirAll(instDir, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create temporary directory", - fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), - )) - return diags - } - - instManifest := make(modsdir.Manifest) - retManifest := make(modsdir.Manifest) - - fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) - fakePos := tfconfig.SourcePos{ - Filename: fakeFilename, - Line: 1, - } - - // -from-module allows relative paths but it's different than a normal - // module address where it'd be resolved relative to the module call - // (which is synthetic, here.) To address this, we'll just patch up any - // relative paths to be absolute paths before we run, ensuring we'll - // get the right result. This also, as an important side-effect, ensures - // that the result will be "downloaded" with go-getter (copied from the - // source location), rather than just recorded as a relative path. - { - maybePath := filepath.ToSlash(sourceAddr) - if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { - if wd, err := os.Getwd(); err == nil { - sourceAddr = filepath.Join(wd, sourceAddr) - log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) - } - } - } - - // Now we need to create an artificial root module that will seed our - // installation process. - fakeRootModule := &tfconfig.Module{ - ModuleCalls: map[string]*tfconfig.ModuleCall{ - initFromModuleRootCallName: { - Name: initFromModuleRootCallName, - Source: sourceAddr, - Pos: fakePos, - }, - }, - } - - // wrapHooks filters hook notifications to only include Download calls - // and to trim off the initFromModuleRootCallName prefix. We'll produce - // our own Install notifications directly below. - wrapHooks := installHooksInitDir{ - Wrapped: hooks, - } - getter := reusingGetter{} - _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) - diags = append(diags, instDiags...) - if instDiags.HasErrors() { - return diags - } - - // If all of that succeeded then we'll now migrate what was installed - // into the final directory structure. - err = os.MkdirAll(modulesDir, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create local modules directory", - fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), - )) - return diags - } - - recordKeys := make([]string, 0, len(instManifest)) - for k := range instManifest { - recordKeys = append(recordKeys, k) - } - sort.Strings(recordKeys) - - for _, recordKey := range recordKeys { - record := instManifest[recordKey] - - if record.Key == initFromModuleRootCallName { - // We've found the module the user requested, which we must - // now copy into rootDir so it can be used directly. - log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) - err := copyDir(rootDir, record.Dir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to copy root module", - fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), - )) - continue - } - - // We'll try to load the newly-copied module here just so we can - // sniff for any module calls that ../ out of the root directory - // and must thus be rewritten to be absolute addresses again. - // For now we can't do this rewriting automatically, but we'll - // generate an error to help the user do it manually. - mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway - if mod != nil { - for _, mc := range mod.ModuleCalls { - if pathTraversesUp(mc.Source) { - packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) - newSubdir := filepath.Join(givenSubdir, mc.Source) - if pathTraversesUp(newSubdir) { - // This should never happen in any reasonable - // configuration since this suggests a path that - // traverses up out of the package root. We'll just - // ignore this, since we'll fail soon enough anyway - // trying to resolve this path when this module is - // loaded. - continue - } - - var newAddr = packageAddr - if newSubdir != "" { - newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) - } - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Root module references parent directory", - fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), - )) - continue - } - } - } - - retManifest[""] = modsdir.Record{ - Key: "", - Dir: rootDir, - } - continue - } - - if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { - // Ignore the *real* root module, whose key is empty, since - // we're only interested in the module named "root" and its - // descendents. - continue - } - - newKey := record.Key[len(initFromModuleRootKeyPrefix):] - instPath := filepath.Join(modulesDir, newKey) - tempPath := filepath.Join(instDir, record.Key) - - // tempPath won't be present for a module that was installed from - // a relative path, so in that case we just record the installation - // directory and assume it was already copied into place as part - // of its parent. - if _, err := os.Stat(tempPath); err != nil { - if !os.IsNotExist(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to stat temporary module install directory", - fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), - )) - continue - } - - var parentKey string - if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { - parentKey = newKey[:lastDot] - } else { - parentKey = "" // parent is the root module - } - - parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] - parentNew := retManifest[parentKey] - - // We need to figure out which portion of our directory is the - // parent package path and which portion is the subdirectory - // under that. - baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) - if err != nil { - // Should never happen, because we constructed both directories - // from the same base and so they must have a common prefix. - panic(err) - } - - newDir := filepath.Join(parentNew.Dir, baseDirRel) - log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) - newRecord := record // shallow copy - newRecord.Dir = newDir - newRecord.Key = newKey - retManifest[newKey] = newRecord - hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) - continue - } - - err = os.MkdirAll(instPath, os.ModePerm) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to create module install directory", - fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), - )) - continue - } - - // We copy rather than "rename" here because renaming between directories - // can be tricky in edge-cases like network filesystems, etc. - log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) - err := copyDir(instPath, tempPath) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to copy descendent module", - fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), - )) - continue - } - - subDir, err := filepath.Rel(tempPath, record.Dir) - if err != nil { - // Should never happen, because we constructed both directories - // from the same base and so they must have a common prefix. - panic(err) - } - - newRecord := record // shallow copy - newRecord.Dir = filepath.Join(instPath, subDir) - newRecord.Key = newKey - retManifest[newKey] = newRecord - hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) - } - - retManifest.WriteSnapshotToDir(modulesDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write module manifest", - fmt.Sprintf("Error writing module manifest: %s.", err), - )) - } - - if !diags.HasErrors() { - // Try to clean up our temporary directory, but don't worry if we don't - // succeed since it shouldn't hurt anything. - os.RemoveAll(instDir) - } - - return diags -} - -func pathTraversesUp(path string) bool { - return strings.HasPrefix(filepath.ToSlash(path), "../") -} - -// installHooksInitDir is an adapter wrapper for an InstallHooks that -// does some fakery to make downloads look like they are happening in their -// final locations, rather than in the temporary loader we use. -// -// It also suppresses "Install" calls entirely, since InitDirFromModule -// does its own installation steps after the initial installation pass -// has completed. -type installHooksInitDir struct { - Wrapped ModuleInstallHooks - ModuleInstallHooksImpl -} - -func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { - if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { - // We won't announce the root module, since hook implementations - // don't expect to see that and the caller will usually have produced - // its own user-facing notification about what it's doing anyway. - return - } - - trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] - h.Wrapped.Download(trimAddr, packageAddr, version) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go deleted file mode 100644 index 85574f7c..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go +++ /dev/null @@ -1,213 +0,0 @@ -package initwd - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - getter "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/registry/regsrc" -) - -// We configure our own go-getter detector and getter sets here, because -// the set of sources we support is part of Terraform's documentation and -// so we don't want any new sources introduced in go-getter to sneak in here -// and work even though they aren't documented. This also insulates us from -// any meddling that might be done by other go-getter callers linked into our -// executable. - -var goGetterDetectors = []getter.Detector{ - new(getter.GitHubDetector), - new(getter.GitDetector), - new(getter.BitBucketDetector), - new(getter.GCSDetector), - new(getter.S3Detector), - new(getter.FileDetector), -} - -var goGetterNoDetectors = []getter.Detector{} - -var goGetterDecompressors = map[string]getter.Decompressor{ - "bz2": new(getter.Bzip2Decompressor), - "gz": new(getter.GzipDecompressor), - "xz": new(getter.XzDecompressor), - "zip": new(getter.ZipDecompressor), - - "tar.bz2": new(getter.TarBzip2Decompressor), - "tar.tbz2": new(getter.TarBzip2Decompressor), - - "tar.gz": new(getter.TarGzipDecompressor), - "tgz": new(getter.TarGzipDecompressor), - - "tar.xz": new(getter.TarXzDecompressor), - "txz": new(getter.TarXzDecompressor), -} - -var goGetterGetters = map[string]getter.Getter{ - "file": new(getter.FileGetter), - "gcs": new(getter.GCSGetter), - "git": new(getter.GitGetter), - "hg": new(getter.HgGetter), - "s3": new(getter.S3Getter), - "http": getterHTTPGetter, - "https": getterHTTPGetter, -} - -var getterHTTPClient = cleanhttp.DefaultClient() - -var getterHTTPGetter = &getter.HttpGetter{ - Client: getterHTTPClient, - Netrc: true, -} - -// A reusingGetter is a helper for the module installer that remembers -// the final resolved addresses of all of the sources it has already been -// asked to install, and will copy from a prior installation directory if -// it has the same resolved source address. -// -// The keys in a reusingGetter are resolved and trimmed source addresses -// (with a scheme always present, and without any "subdir" component), -// and the values are the paths where each source was previously installed. -type reusingGetter map[string]string - -// getWithGoGetter retrieves the package referenced in the given address -// into the installation path and then returns the full path to any subdir -// indicated in the address. -// -// The errors returned by this function are those surfaced by the underlying -// go-getter library, which have very inconsistent quality as -// end-user-actionable error messages. At this time we do not have any -// reasonable way to improve these error messages at this layer because -// the underlying errors are not separately recognizable. -func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { - packageAddr, subDir := splitAddrSubdir(addr) - - log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) - - realAddr, err := getter.Detect(packageAddr, instPath, goGetterDetectors) - if err != nil { - return "", err - } - - if isMaybeRelativeLocalPath(realAddr) { - return "", &MaybeRelativePathErr{addr} - } - - var realSubDir string - realAddr, realSubDir = splitAddrSubdir(realAddr) - if realSubDir != "" { - subDir = filepath.Join(realSubDir, subDir) - } - - if realAddr != packageAddr { - log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) - } - - if prevDir, exists := g[realAddr]; exists { - log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) - err := os.Mkdir(instPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) - } - err = copyDir(instPath, prevDir) - if err != nil { - return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) - } - } else { - log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) - client := getter.Client{ - Src: realAddr, - Dst: instPath, - Pwd: instPath, - - Mode: getter.ClientModeDir, - - Detectors: goGetterNoDetectors, // we already did detection above - Decompressors: goGetterDecompressors, - Getters: goGetterGetters, - } - err = client.Get() - if err != nil { - return "", err - } - // Remember where we installed this so we might reuse this directory - // on subsequent calls to avoid re-downloading. - g[realAddr] = instPath - } - - // Our subDir string can contain wildcards until this point, so that - // e.g. a subDir of * can expand to one top-level directory in a .tar.gz - // archive. Now that we've expanded the archive successfully we must - // resolve that into a concrete path. - var finalDir string - if subDir != "" { - finalDir, err = getter.SubdirGlob(instPath, subDir) - log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) - if err != nil { - return "", err - } - } else { - finalDir = instPath - } - - // If we got this far then we have apparently succeeded in downloading - // the requested object! - return filepath.Clean(finalDir), nil -} - -// splitAddrSubdir splits the given address (which is assumed to be a -// registry address or go-getter-style address) into a package portion -// and a sub-directory portion. -// -// The package portion defines what should be downloaded and then the -// sub-directory portion, if present, specifies a sub-directory within -// the downloaded object (an archive, VCS repository, etc) that contains -// the module's configuration files. -// -// The subDir portion will be returned as empty if no subdir separator -// ("//") is present in the address. -func splitAddrSubdir(addr string) (packageAddr, subDir string) { - return getter.SourceDirSubdir(addr) -} - -var localSourcePrefixes = []string{ - "./", - "../", - ".\\", - "..\\", -} - -func isLocalSourceAddr(addr string) bool { - for _, prefix := range localSourcePrefixes { - if strings.HasPrefix(addr, prefix) { - return true - } - } - return false -} - -func isRegistrySourceAddr(addr string) bool { - _, err := regsrc.ParseModuleSource(addr) - return err == nil -} - -type MaybeRelativePathErr struct { - Addr string -} - -func (e *MaybeRelativePathErr) Error() string { - return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) -} - -func isMaybeRelativeLocalPath(addr string) bool { - if strings.HasPrefix(addr, "file://") { - _, err := os.Stat(addr[7:]) - if err != nil { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go deleted file mode 100644 index 1150b093..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux darwin openbsd netbsd solaris dragonfly - -package initwd - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return st.Ino, nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go deleted file mode 100644 index 30532f54..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build freebsd - -package initwd - -import ( - "fmt" - "os" - "syscall" -) - -// lookup the inode of a file on posix systems -func inode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - if st, ok := stat.Sys().(*syscall.Stat_t); ok { - return uint64(st.Ino), nil - } - return 0, fmt.Errorf("could not determine file inode") -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go deleted file mode 100644 index 3ed58e4b..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package initwd - -// no syscall.Stat_t on windows, return 0 for inodes -func inode(path string) (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go deleted file mode 100644 index 6f77dcd8..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go +++ /dev/null @@ -1,56 +0,0 @@ -package initwd - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/internal/earlyconfig" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/tfdiags" -) - -// LoadConfig loads a full configuration tree that has previously had all of -// its dependent modules installed to the given modulesDir using a -// ModuleInstaller. -// -// This uses the early configuration loader and thus only reads top-level -// metadata from the modules in the configuration. Most callers should use -// the configs/configload package to fully load a configuration. -func LoadConfig(rootDir, modulesDir string) (*earlyconfig.Config, tfdiags.Diagnostics) { - rootMod, diags := earlyconfig.LoadModule(rootDir) - if rootMod == nil { - return nil, diags - } - - manifest, err := modsdir.ReadManifestSnapshotForDir(modulesDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read module manifest", - fmt.Sprintf("Terraform failed to read its manifest of locally-cached modules: %s.", err), - )) - return nil, diags - } - - return earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( - func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - key := manifest.ModuleKey(req.Path) - record, exists := manifest[key] - if !exists { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not installed", - fmt.Sprintf("Module %s is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", req.Path.String()), - )) - return nil, nil, diags - } - - mod, mDiags := earlyconfig.LoadModule(record.Dir) - diags = diags.Append(mDiags) - return mod, record.Version, diags - }, - )) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go deleted file mode 100644 index 38a44c26..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go +++ /dev/null @@ -1,590 +0,0 @@ -package initwd - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform-config-inspect/tfconfig" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/internal/earlyconfig" - "github.com/hashicorp/terraform/internal/modsdir" - "github.com/hashicorp/terraform/registry" - "github.com/hashicorp/terraform/registry/regsrc" - "github.com/hashicorp/terraform/registry/response" - "github.com/hashicorp/terraform/tfdiags" -) - -type ModuleInstaller struct { - modsDir string - reg *registry.Client - - // The keys in moduleVersions are resolved and trimmed registry source - // addresses and the values are the registry response. - moduleVersions map[string]*response.ModuleVersions - - // The keys in moduleVersionsUrl are the moduleVersion struct below and - // addresses and the values are the download URLs. - moduleVersionsUrl map[moduleVersion]string -} - -type moduleVersion struct { - module string - version string -} - -func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { - return &ModuleInstaller{ - modsDir: modsDir, - reg: reg, - moduleVersions: make(map[string]*response.ModuleVersions), - moduleVersionsUrl: make(map[moduleVersion]string), - } -} - -// InstallModules analyses the root module in the given directory and installs -// all of its direct and transitive dependencies into the given modules -// directory, which must already exist. -// -// Since InstallModules makes possibly-time-consuming calls to remote services, -// a hook interface is supported to allow the caller to be notified when -// each module is installed and, for remote modules, when downloading begins. -// LoadConfig guarantees that two hook calls will not happen concurrently but -// it does not guarantee any particular ordering of hook calls. This mechanism -// is for UI feedback only and does not give the caller any control over the -// process. -// -// If modules are already installed in the target directory, they will be -// skipped unless their source address or version have changed or unless -// the upgrade flag is set. -// -// InstallModules never deletes any directory, except in the case where it -// needs to replace a directory that is already present with a newly-extracted -// package. -// -// If the returned diagnostics contains errors then the module installation -// may have wholly or partially completed. Modules must be loaded in order -// to find their dependencies, so this function does many of the same checks -// as LoadConfig as a side-effect. -// -// If successful (the returned diagnostics contains no errors) then the -// first return value is the early configuration tree that was constructed by -// the installation process. -func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { - log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) - - rootMod, diags := earlyconfig.LoadModule(rootDir) - if rootMod == nil { - return nil, diags - } - - manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read modules manifest file", - fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), - )) - return nil, diags - } - - getter := reusingGetter{} - cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) - diags = append(diags, instDiags...) - - return cfg, diags -} - -func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if hooks == nil { - // Use our no-op implementation as a placeholder - hooks = ModuleInstallHooksImpl{} - } - - // Create a manifest record for the root module. This will be used if - // there are any relative-pathed modules in the root. - manifest[""] = modsdir.Record{ - Key: "", - Dir: rootDir, - } - - cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( - func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - - key := manifest.ModuleKey(req.Path) - instPath := i.packageInstallPath(req.Path) - - log.Printf("[DEBUG] Module installer: begin %s", key) - - // First we'll check if we need to upgrade/replace an existing - // installed module, and delete it out of the way if so. - replace := upgrade - if !replace { - record, recorded := manifest[key] - switch { - case !recorded: - log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) - replace = true - case record.SourceAddr != req.SourceAddr: - log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) - replace = true - case record.Version != nil && !req.VersionConstraints.Check(record.Version): - log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) - replace = true - } - } - - // If we _are_ planning to replace this module, then we'll remove - // it now so our installation code below won't conflict with any - // existing remnants. - if replace { - if _, recorded := manifest[key]; recorded { - log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) - } - delete(manifest, key) - // Deleting a module invalidates all of its descendent modules too. - keyPrefix := key + "." - for subKey := range manifest { - if strings.HasPrefix(subKey, keyPrefix) { - if _, recorded := manifest[subKey]; recorded { - log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) - } - delete(manifest, subKey) - } - } - } - - record, recorded := manifest[key] - if !recorded { - // Clean up any stale cache directory that might be present. - // If this is a local (relative) source then the dir will - // not exist, but we'll ignore that. - log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) - err := os.RemoveAll(instPath) - if err != nil && !os.IsNotExist(err) { - log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to remove local module cache", - fmt.Sprintf( - "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", - instPath, err, - ), - )) - return nil, nil, diags - } - } else { - // If this module is already recorded and its root directory - // exists then we will just load what's already there and - // keep our existing record. - info, err := os.Stat(record.Dir) - if err == nil && info.IsDir() { - mod, mDiags := earlyconfig.LoadModule(record.Dir) - diags = diags.Append(mDiags) - - log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) - return mod, record.Version, diags - } - } - - // If we get down here then it's finally time to actually install - // the module. There are some variants to this process depending - // on what type of module source address we have. - switch { - - case isLocalSourceAddr(req.SourceAddr): - log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) - mod, mDiags := i.installLocalModule(req, key, manifest, hooks) - diags = append(diags, mDiags...) - return mod, nil, diags - - case isRegistrySourceAddr(req.SourceAddr): - addr, err := regsrc.ParseModuleSource(req.SourceAddr) - if err != nil { - // Should never happen because isRegistrySourceAddr already validated - panic(err) - } - log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) - - mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) - diags = append(diags, mDiags...) - return mod, v, diags - - default: - log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) - - mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) - diags = append(diags, mDiags...) - return mod, nil, diags - } - - }, - )) - diags = append(diags, cDiags...) - - err := manifest.WriteSnapshotToDir(i.modsDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to update module manifest", - fmt.Sprintf("Unable to write the module manifest file: %s", err), - )) - } - - return cfg, diags -} - -func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - parentKey := manifest.ModuleKey(req.Parent.Path) - parentRecord, recorded := manifest[parentKey] - if !recorded { - // This is indicative of a bug rather than a user-actionable error - panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) - } - - if len(req.VersionConstraints) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid version constraint", - fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } - - // For local sources we don't actually need to modify the - // filesystem at all because the parent already wrote - // the files we need, and so we just load up what's already here. - newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) - - log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) - // it is possible that the local directory is a symlink - newDir, err := filepath.EvalSymlinks(newDir) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), - )) - } - - mod, mDiags := earlyconfig.LoadModule(newDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } else { - diags = diags.Append(mDiags) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Dir: newDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) - hooks.Install(key, nil, newDir) - - return mod, diags -} - -func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - hostname, err := addr.SvcHost() - if err != nil { - // If it looks like the user was trying to use punycode then we'll generate - // a specialized error for that case. We require the unicode form of - // hostname so that hostnames are always human-readable in configuration - // and punycode can't be used to hide a malicious module hostname. - if strings.HasPrefix(addr.RawHost.Raw, "xn--") { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid module registry hostname", - fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid module registry hostname", - fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - } - return nil, nil, diags - } - - reg := i.reg - var resp *response.ModuleVersions - var exists bool - - // check if we've already looked up this module from the registry - if resp, exists = i.moduleVersions[addr.String()]; exists { - log.Printf("[TRACE] %s using already found available versions of %s at %s", key, addr, hostname) - } else { - log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) - resp, err = reg.ModuleVersions(addr) - if err != nil { - if registry.IsModuleNotFound(err) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not found", - fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error accessing remote module registry", - fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), - )) - } - return nil, nil, diags - } - i.moduleVersions[addr.String()] = resp - } - - // The response might contain information about dependencies to allow us - // to potentially optimize future requests, but we don't currently do that - // and so for now we'll just take the first item which is guaranteed to - // be the address we requested. - if len(resp.Modules) < 1 { - // Should never happen, but since this is a remote service that may - // be implemented by third-parties we will handle it gracefully. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid response from remote module registry", - fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - return nil, nil, diags - } - - modMeta := resp.Modules[0] - - var latestMatch *version.Version - var latestVersion *version.Version - for _, mv := range modMeta.Versions { - v, err := version.NewVersion(mv.Version) - if err != nil { - // Should never happen if the registry server is compliant with - // the protocol, but we'll warn if not to assist someone who - // might be developing a module registry server. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Invalid response from remote module registry", - fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - continue - } - - // If we've found a pre-release version then we'll ignore it unless - // it was exactly requested. - if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { - log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) - continue - } - - if latestVersion == nil || v.GreaterThan(latestVersion) { - latestVersion = v - } - - if req.VersionConstraints.Check(v) { - if latestMatch == nil || v.GreaterThan(latestMatch) { - latestMatch = v - } - } - } - - if latestVersion == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module has no versions", - fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), - )) - return nil, nil, diags - } - - if latestMatch == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unresolvable module version constraint", - fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), - )) - return nil, nil, diags - } - - // Report up to the caller that we're about to start downloading. - packageAddr, _ := splitAddrSubdir(req.SourceAddr) - hooks.Download(key, packageAddr, latestMatch) - - // If we manage to get down here then we've found a suitable version to - // install, so we need to ask the registry where we should download it from. - // The response to this is a go-getter-style address string. - - // first check the cache for the download URL - moduleAddr := moduleVersion{module: addr.String(), version: latestMatch.String()} - if _, exists := i.moduleVersionsUrl[moduleAddr]; !exists { - url, err := reg.ModuleLocation(addr, latestMatch.String()) - if err != nil { - log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Error accessing remote module registry", - fmt.Sprintf("Failed to retrieve a download URL for %s %s from %s: %s", addr, latestMatch, hostname, err), - )) - return nil, nil, diags - } - i.moduleVersionsUrl[moduleVersion{module: addr.String(), version: latestMatch.String()}] = url - } - - dlAddr := i.moduleVersionsUrl[moduleAddr] - - log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) - - modDir, err := getter.getWithGoGetter(instPath, dlAddr) - if err != nil { - // Errors returned by go-getter have very inconsistent quality as - // end-user error messages, but for now we're accepting that because - // we have no way to recognize any specific errors to improve them - // and masking the error entirely would hide valuable diagnostic - // information from the user. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to download module", - fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), - )) - return nil, nil, diags - } - - log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) - - if addr.RawSubmodule != "" { - // Append the user's requested subdirectory to any subdirectory that - // was implied by any of the nested layers we expanded within go-getter. - modDir = filepath.Join(modDir, addr.RawSubmodule) - } - - log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) - - // Finally we are ready to try actually loading the module. - mod, mDiags := earlyconfig.LoadModule(modDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. For registry modules this actually - // indicates a bug in the code above, since it's not the - // user's responsibility to create the directory in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), - )) - } else { - diags = append(diags, mDiags...) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Version: latestMatch, - Dir: modDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) - hooks.Install(key, latestMatch, modDir) - - return mod, latestMatch, diags -} - -func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Report up to the caller that we're about to start downloading. - packageAddr, _ := splitAddrSubdir(req.SourceAddr) - hooks.Download(key, packageAddr, nil) - - if len(req.VersionConstraints) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid version constraint", - fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line), - )) - return nil, diags - } - - modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) - if err != nil { - if _, ok := err.(*MaybeRelativePathErr); ok { - log.Printf( - "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", - req.SourceAddr, - ) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Module not found", - fmt.Sprintf( - "The module address %q could not be resolved.\n\n"+ - "If you intended this as a path relative to the current "+ - "module, use \"./%s\" instead. The \"./\" prefix "+ - "indicates that the address is a relative filesystem path.", - req.SourceAddr, req.SourceAddr, - ), - )) - } else { - // Errors returned by go-getter have very inconsistent quality as - // end-user error messages, but for now we're accepting that because - // we have no way to recognize any specific errors to improve them - // and masking the error entirely would hide valuable diagnostic - // information from the user. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to download module", - fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), - )) - } - return nil, diags - - } - - log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) - - mod, mDiags := earlyconfig.LoadModule(modDir) - if mod == nil { - // nil indicates missing or unreadable directory, so we'll - // discard the returned diags and return a more specific - // error message here. For go-getter modules this actually - // indicates a bug in the code above, since it's not the - // user's responsibility to create the directory in this case. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unreadable module directory", - fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), - )) - } else { - diags = append(diags, mDiags...) - } - - // Note the local location in our manifest. - manifest[key] = modsdir.Record{ - Key: key, - Dir: modDir, - SourceAddr: req.SourceAddr, - } - log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) - hooks.Install(key, nil, modDir) - - return mod, diags -} - -func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { - return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go deleted file mode 100644 index 817a6dc8..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go +++ /dev/null @@ -1,36 +0,0 @@ -package initwd - -import ( - version "github.com/hashicorp/go-version" -) - -// ModuleInstallHooks is an interface used to provide notifications about the -// installation process being orchestrated by InstallModules. -// -// This interface may have new methods added in future, so implementers should -// embed InstallHooksImpl to get no-op implementations of any unimplemented -// methods. -type ModuleInstallHooks interface { - // Download is called for modules that are retrieved from a remote source - // before that download begins, to allow a caller to give feedback - // on progress through a possibly-long sequence of downloads. - Download(moduleAddr, packageAddr string, version *version.Version) - - // Install is called for each module that is installed, even if it did - // not need to be downloaded from a remote source. - Install(moduleAddr string, version *version.Version, localPath string) -} - -// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that -// can be embedded in another implementation struct to allow only partial -// implementation of the interface. -type ModuleInstallHooksImpl struct { -} - -func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { -} - -func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { -} - -var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go deleted file mode 100644 index 8cef80a3..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go +++ /dev/null @@ -1,73 +0,0 @@ -package initwd - -import ( - "github.com/hashicorp/terraform/registry" - "testing" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configload" - "github.com/hashicorp/terraform/tfdiags" -) - -// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, -// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows -// a test configuration to be loaded in a single step. -// -// If module installation fails, t.Fatal (or similar) is called to halt -// execution of the test, under the assumption that installation failures are -// not expected. If installation failures _are_ expected then use -// NewLoaderForTests and work with the loader object directly. If module -// installation succeeds but generates warnings, these warnings are discarded. -// -// If installation succeeds but errors are detected during loading then a -// possibly-incomplete config is returned along with error diagnostics. The -// test run is not aborted in this case, so that the caller can make assertions -// against the returned diagnostics. -// -// As with NewLoaderForTests, a cleanup function is returned which must be -// called before the test completes in order to remove the temporary -// modules directory. -func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { - t.Helper() - - var diags tfdiags.Diagnostics - - loader, cleanup := configload.NewLoaderForTests(t) - inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) - - _, moreDiags := inst.InstallModules(rootDir, true, ModuleInstallHooksImpl{}) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - cleanup() - t.Fatal(diags.Err()) - return nil, nil, func() {}, diags - } - - // Since module installer has modified the module manifest on disk, we need - // to refresh the cache of it in the loader. - if err := loader.RefreshModules(); err != nil { - t.Fatalf("failed to refresh modules after installation: %s", err) - } - - config, hclDiags := loader.LoadConfig(rootDir) - diags = diags.Append(hclDiags) - return config, loader, cleanup, diags -} - -// MustLoadConfigForTests is a variant of LoadConfigForTests which calls -// t.Fatal (or similar) if there are any errors during loading, and thus -// does not return diagnostics at all. -// -// This is useful for concisely writing tests that don't expect errors at -// all. For tests that expect errors and need to assert against them, use -// LoadConfigForTests instead. -func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) { - t.Helper() - - config, loader, cleanup, diags := LoadConfigForTests(t, rootDir) - if diags.HasErrors() { - cleanup() - t.Fatal(diags.Err()) - } - return config, loader, cleanup -} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go deleted file mode 100644 index 104840b9..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go +++ /dev/null @@ -1,83 +0,0 @@ -package initwd - -import ( - "fmt" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/internal/earlyconfig" - "github.com/hashicorp/terraform/tfdiags" - tfversion "github.com/hashicorp/terraform/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// early configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(earlyConfig *earlyconfig.Config) tfdiags.Diagnostics { - if earlyConfig == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := earlyConfig.Module - - var constraints version.Constraints - for _, constraintStr := range module.RequiredCore { - constraint, err := version.NewConstraint(constraintStr) - if err != nil { - // Unfortunately the early config parser doesn't preserve a source - // location for this, so we're unable to indicate a specific - // location where this constraint came from, but we can at least - // say which module set it. - switch { - case len(earlyConfig.Path) == 0: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider version constraint", - fmt.Sprintf("Invalid version core constraint %q in the root module.", constraintStr), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid provider version constraint", - fmt.Sprintf("Invalid version core constraint %q in %s.", constraintStr, earlyConfig.Path), - )) - } - continue - } - constraints = append(constraints, constraint...) - } - - if !constraints.Check(tfversion.SemVer) { - switch { - case len(earlyConfig.Path) == 0: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported Terraform Core version", - fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the root module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported Terraform Core version", - fmt.Sprintf( - "Module %s (from %q) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - earlyConfig.Path, earlyConfig.SourceAddr, tfversion.String(), - ), - )) - } - } - - for _, c := range earlyConfig.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go deleted file mode 100644 index 0d7d664f..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package modsdir is an internal package containing the model types used to -// represent the manifest of modules in a local modules cache directory. -package modsdir diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go deleted file mode 100644 index 92b40899..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go +++ /dev/null @@ -1,147 +0,0 @@ -package modsdir - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/addrs" -) - -// Record represents some metadata about an installed module, as part -// of a ModuleManifest. -type Record struct { - // Key is a unique identifier for this particular module, based on its - // position within the static module tree. - Key string `json:"Key"` - - // SourceAddr is the source address given for this module in configuration. - // This is used only to detect if the source was changed in configuration - // since the module was last installed, which means that the installer - // must re-install it. - SourceAddr string `json:"Source"` - - // Version is the exact version of the module, which results from parsing - // VersionStr. nil for un-versioned modules. - Version *version.Version `json:"-"` - - // VersionStr is the version specifier string. This is used only for - // serialization in snapshots and should not be accessed or updated - // by any other codepaths; use "Version" instead. - VersionStr string `json:"Version,omitempty"` - - // Dir is the path to the local directory where the module is installed. - Dir string `json:"Dir"` -} - -// Manifest is a map used to keep track of the filesystem locations -// and other metadata about installed modules. -// -// The configuration loader refers to this, while the module installer updates -// it to reflect any changes to the installed modules. -type Manifest map[string]Record - -func (m Manifest) ModuleKey(path addrs.Module) string { - return path.String() -} - -// manifestSnapshotFile is an internal struct used only to assist in our JSON -// serialization of manifest snapshots. It should not be used for any other -// purpose. -type manifestSnapshotFile struct { - Records []Record `json:"Modules"` -} - -func ReadManifestSnapshot(r io.Reader) (Manifest, error) { - src, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - if len(src) == 0 { - // This should never happen, but we'll tolerate it as if it were - // a valid empty JSON object. - return make(Manifest), nil - } - - var read manifestSnapshotFile - err = json.Unmarshal(src, &read) - - new := make(Manifest) - for _, record := range read.Records { - if record.VersionStr != "" { - record.Version, err = version.NewVersion(record.VersionStr) - if err != nil { - return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) - } - } - - // Ensure Windows is using the proper modules path format after - // reading the modules manifest Dir records - record.Dir = filepath.FromSlash(record.Dir) - - if _, exists := new[record.Key]; exists { - // This should never happen in any valid file, so we'll catch it - // and report it to avoid confusing/undefined behavior if the - // snapshot file was edited incorrectly outside of Terraform. - return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) - } - new[record.Key] = record - } - return new, nil -} - -func ReadManifestSnapshotForDir(dir string) (Manifest, error) { - fn := filepath.Join(dir, ManifestSnapshotFilename) - r, err := os.Open(fn) - if err != nil { - if os.IsNotExist(err) { - return make(Manifest), nil // missing file is okay and treated as empty - } - return nil, err - } - return ReadManifestSnapshot(r) -} - -func (m Manifest) WriteSnapshot(w io.Writer) error { - var write manifestSnapshotFile - - for _, record := range m { - // Make sure VersionStr is in sync with Version, since we encourage - // callers to manipulate Version and ignore VersionStr. - if record.Version != nil { - record.VersionStr = record.Version.String() - } else { - record.VersionStr = "" - } - - // Ensure Dir is written in a format that can be read by Linux and - // Windows nodes for remote and apply compatibility - record.Dir = filepath.ToSlash(record.Dir) - write.Records = append(write.Records, record) - } - - src, err := json.Marshal(write) - if err != nil { - return err - } - - _, err = w.Write(src) - return err -} - -func (m Manifest) WriteSnapshotToDir(dir string) error { - fn := filepath.Join(dir, ManifestSnapshotFilename) - log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) - w, err := os.Create(fn) - if err != nil { - return err - } - return m.WriteSnapshot(w) -} diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go deleted file mode 100644 index 9ebb5243..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go +++ /dev/null @@ -1,3 +0,0 @@ -package modsdir - -const ManifestSnapshotFilename = "modules.json" diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go deleted file mode 100644 index 86fd21e4..00000000 --- a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go +++ /dev/null @@ -1,3518 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: tfplugin5.proto - -package tfplugin5 - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Diagnostic_Severity int32 - -const ( - Diagnostic_INVALID Diagnostic_Severity = 0 - Diagnostic_ERROR Diagnostic_Severity = 1 - Diagnostic_WARNING Diagnostic_Severity = 2 -) - -var Diagnostic_Severity_name = map[int32]string{ - 0: "INVALID", - 1: "ERROR", - 2: "WARNING", -} - -var Diagnostic_Severity_value = map[string]int32{ - "INVALID": 0, - "ERROR": 1, - "WARNING": 2, -} - -func (x Diagnostic_Severity) String() string { - return proto.EnumName(Diagnostic_Severity_name, int32(x)) -} - -func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{1, 0} -} - -type Schema_NestedBlock_NestingMode int32 - -const ( - Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 - Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 - Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 - Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 - Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 - Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 -) - -var Schema_NestedBlock_NestingMode_name = map[int32]string{ - 0: "INVALID", - 1: "SINGLE", - 2: "LIST", - 3: "SET", - 4: "MAP", - 5: "GROUP", -} - -var Schema_NestedBlock_NestingMode_value = map[string]int32{ - "INVALID": 0, - "SINGLE": 1, - "LIST": 2, - "SET": 3, - "MAP": 4, - "GROUP": 5, -} - -func (x Schema_NestedBlock_NestingMode) String() string { - return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) -} - -func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} -} - -// DynamicValue is an opaque encoding of terraform data, with the field name -// indicating the encoding scheme used. -type DynamicValue struct { - Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` - Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DynamicValue) Reset() { *m = DynamicValue{} } -func (m *DynamicValue) String() string { return proto.CompactTextString(m) } -func (*DynamicValue) ProtoMessage() {} -func (*DynamicValue) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{0} -} - -func (m *DynamicValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DynamicValue.Unmarshal(m, b) -} -func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) -} -func (m *DynamicValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DynamicValue.Merge(m, src) -} -func (m *DynamicValue) XXX_Size() int { - return xxx_messageInfo_DynamicValue.Size(m) -} -func (m *DynamicValue) XXX_DiscardUnknown() { - xxx_messageInfo_DynamicValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DynamicValue proto.InternalMessageInfo - -func (m *DynamicValue) GetMsgpack() []byte { - if m != nil { - return m.Msgpack - } - return nil -} - -func (m *DynamicValue) GetJson() []byte { - if m != nil { - return m.Json - } - return nil -} - -type Diagnostic struct { - Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` - Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Diagnostic) Reset() { *m = Diagnostic{} } -func (m *Diagnostic) String() string { return proto.CompactTextString(m) } -func (*Diagnostic) ProtoMessage() {} -func (*Diagnostic) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{1} -} - -func (m *Diagnostic) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Diagnostic.Unmarshal(m, b) -} -func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) -} -func (m *Diagnostic) XXX_Merge(src proto.Message) { - xxx_messageInfo_Diagnostic.Merge(m, src) -} -func (m *Diagnostic) XXX_Size() int { - return xxx_messageInfo_Diagnostic.Size(m) -} -func (m *Diagnostic) XXX_DiscardUnknown() { - xxx_messageInfo_Diagnostic.DiscardUnknown(m) -} - -var xxx_messageInfo_Diagnostic proto.InternalMessageInfo - -func (m *Diagnostic) GetSeverity() Diagnostic_Severity { - if m != nil { - return m.Severity - } - return Diagnostic_INVALID -} - -func (m *Diagnostic) GetSummary() string { - if m != nil { - return m.Summary - } - return "" -} - -func (m *Diagnostic) GetDetail() string { - if m != nil { - return m.Detail - } - return "" -} - -func (m *Diagnostic) GetAttribute() *AttributePath { - if m != nil { - return m.Attribute - } - return nil -} - -type AttributePath struct { - Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributePath) Reset() { *m = AttributePath{} } -func (m *AttributePath) String() string { return proto.CompactTextString(m) } -func (*AttributePath) ProtoMessage() {} -func (*AttributePath) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{2} -} - -func (m *AttributePath) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributePath.Unmarshal(m, b) -} -func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) -} -func (m *AttributePath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributePath.Merge(m, src) -} -func (m *AttributePath) XXX_Size() int { - return xxx_messageInfo_AttributePath.Size(m) -} -func (m *AttributePath) XXX_DiscardUnknown() { - xxx_messageInfo_AttributePath.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributePath proto.InternalMessageInfo - -func (m *AttributePath) GetSteps() []*AttributePath_Step { - if m != nil { - return m.Steps - } - return nil -} - -type AttributePath_Step struct { - // Types that are valid to be assigned to Selector: - // *AttributePath_Step_AttributeName - // *AttributePath_Step_ElementKeyString - // *AttributePath_Step_ElementKeyInt - Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } -func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } -func (*AttributePath_Step) ProtoMessage() {} -func (*AttributePath_Step) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{2, 0} -} - -func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) -} -func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) -} -func (m *AttributePath_Step) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributePath_Step.Merge(m, src) -} -func (m *AttributePath_Step) XXX_Size() int { - return xxx_messageInfo_AttributePath_Step.Size(m) -} -func (m *AttributePath_Step) XXX_DiscardUnknown() { - xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo - -type isAttributePath_Step_Selector interface { - isAttributePath_Step_Selector() -} - -type AttributePath_Step_AttributeName struct { - AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` -} - -type AttributePath_Step_ElementKeyString struct { - ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` -} - -type AttributePath_Step_ElementKeyInt struct { - ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` -} - -func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} - -func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} - -func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} - -func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { - if m != nil { - return m.Selector - } - return nil -} - -func (m *AttributePath_Step) GetAttributeName() string { - if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { - return x.AttributeName - } - return "" -} - -func (m *AttributePath_Step) GetElementKeyString() string { - if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { - return x.ElementKeyString - } - return "" -} - -func (m *AttributePath_Step) GetElementKeyInt() int64 { - if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { - return x.ElementKeyInt - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AttributePath_Step) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AttributePath_Step_AttributeName)(nil), - (*AttributePath_Step_ElementKeyString)(nil), - (*AttributePath_Step_ElementKeyInt)(nil), - } -} - -type Stop struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stop) Reset() { *m = Stop{} } -func (m *Stop) String() string { return proto.CompactTextString(m) } -func (*Stop) ProtoMessage() {} -func (*Stop) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{3} -} - -func (m *Stop) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stop.Unmarshal(m, b) -} -func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stop.Marshal(b, m, deterministic) -} -func (m *Stop) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop.Merge(m, src) -} -func (m *Stop) XXX_Size() int { - return xxx_messageInfo_Stop.Size(m) -} -func (m *Stop) XXX_DiscardUnknown() { - xxx_messageInfo_Stop.DiscardUnknown(m) -} - -var xxx_messageInfo_Stop proto.InternalMessageInfo - -type Stop_Request struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stop_Request) Reset() { *m = Stop_Request{} } -func (m *Stop_Request) String() string { return proto.CompactTextString(m) } -func (*Stop_Request) ProtoMessage() {} -func (*Stop_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{3, 0} -} - -func (m *Stop_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stop_Request.Unmarshal(m, b) -} -func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) -} -func (m *Stop_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop_Request.Merge(m, src) -} -func (m *Stop_Request) XXX_Size() int { - return xxx_messageInfo_Stop_Request.Size(m) -} -func (m *Stop_Request) XXX_DiscardUnknown() { - xxx_messageInfo_Stop_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Stop_Request proto.InternalMessageInfo - -type Stop_Response struct { - Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stop_Response) Reset() { *m = Stop_Response{} } -func (m *Stop_Response) String() string { return proto.CompactTextString(m) } -func (*Stop_Response) ProtoMessage() {} -func (*Stop_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{3, 1} -} - -func (m *Stop_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stop_Response.Unmarshal(m, b) -} -func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) -} -func (m *Stop_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop_Response.Merge(m, src) -} -func (m *Stop_Response) XXX_Size() int { - return xxx_messageInfo_Stop_Response.Size(m) -} -func (m *Stop_Response) XXX_DiscardUnknown() { - xxx_messageInfo_Stop_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Stop_Response proto.InternalMessageInfo - -func (m *Stop_Response) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -// RawState holds the stored state for a resource to be upgraded by the -// provider. It can be in one of two formats, the current json encoded format -// in bytes, or the legacy flatmap format as a map of strings. -type RawState struct { - Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` - Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RawState) Reset() { *m = RawState{} } -func (m *RawState) String() string { return proto.CompactTextString(m) } -func (*RawState) ProtoMessage() {} -func (*RawState) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{4} -} - -func (m *RawState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RawState.Unmarshal(m, b) -} -func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RawState.Marshal(b, m, deterministic) -} -func (m *RawState) XXX_Merge(src proto.Message) { - xxx_messageInfo_RawState.Merge(m, src) -} -func (m *RawState) XXX_Size() int { - return xxx_messageInfo_RawState.Size(m) -} -func (m *RawState) XXX_DiscardUnknown() { - xxx_messageInfo_RawState.DiscardUnknown(m) -} - -var xxx_messageInfo_RawState proto.InternalMessageInfo - -func (m *RawState) GetJson() []byte { - if m != nil { - return m.Json - } - return nil -} - -func (m *RawState) GetFlatmap() map[string]string { - if m != nil { - return m.Flatmap - } - return nil -} - -// Schema is the configuration schema for a Resource, Provider, or Provisioner. -type Schema struct { - // The version of the schema. - // Schemas are versioned, so that providers can upgrade a saved resource - // state when the schema is changed. - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Block is the top level configuration block for this schema. - Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5} -} - -func (m *Schema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema.Unmarshal(m, b) -} -func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema.Marshal(b, m, deterministic) -} -func (m *Schema) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema.Merge(m, src) -} -func (m *Schema) XXX_Size() int { - return xxx_messageInfo_Schema.Size(m) -} -func (m *Schema) XXX_DiscardUnknown() { - xxx_messageInfo_Schema.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema proto.InternalMessageInfo - -func (m *Schema) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Schema) GetBlock() *Schema_Block { - if m != nil { - return m.Block - } - return nil -} - -type Schema_Block struct { - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` - BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema_Block) Reset() { *m = Schema_Block{} } -func (m *Schema_Block) String() string { return proto.CompactTextString(m) } -func (*Schema_Block) ProtoMessage() {} -func (*Schema_Block) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 0} -} - -func (m *Schema_Block) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema_Block.Unmarshal(m, b) -} -func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) -} -func (m *Schema_Block) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_Block.Merge(m, src) -} -func (m *Schema_Block) XXX_Size() int { - return xxx_messageInfo_Schema_Block.Size(m) -} -func (m *Schema_Block) XXX_DiscardUnknown() { - xxx_messageInfo_Schema_Block.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema_Block proto.InternalMessageInfo - -func (m *Schema_Block) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Schema_Block) GetAttributes() []*Schema_Attribute { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { - if m != nil { - return m.BlockTypes - } - return nil -} - -type Schema_Attribute struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` - Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` - Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } -func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } -func (*Schema_Attribute) ProtoMessage() {} -func (*Schema_Attribute) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 1} -} - -func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) -} -func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) -} -func (m *Schema_Attribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_Attribute.Merge(m, src) -} -func (m *Schema_Attribute) XXX_Size() int { - return xxx_messageInfo_Schema_Attribute.Size(m) -} -func (m *Schema_Attribute) XXX_DiscardUnknown() { - xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo - -func (m *Schema_Attribute) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Schema_Attribute) GetType() []byte { - if m != nil { - return m.Type - } - return nil -} - -func (m *Schema_Attribute) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema_Attribute) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *Schema_Attribute) GetOptional() bool { - if m != nil { - return m.Optional - } - return false -} - -func (m *Schema_Attribute) GetComputed() bool { - if m != nil { - return m.Computed - } - return false -} - -func (m *Schema_Attribute) GetSensitive() bool { - if m != nil { - return m.Sensitive - } - return false -} - -type Schema_NestedBlock struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` - Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` - MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } -func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } -func (*Schema_NestedBlock) ProtoMessage() {} -func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{5, 2} -} - -func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) -} -func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) -} -func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_NestedBlock.Merge(m, src) -} -func (m *Schema_NestedBlock) XXX_Size() int { - return xxx_messageInfo_Schema_NestedBlock.Size(m) -} -func (m *Schema_NestedBlock) XXX_DiscardUnknown() { - xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo - -func (m *Schema_NestedBlock) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *Schema_NestedBlock) GetBlock() *Schema_Block { - if m != nil { - return m.Block - } - return nil -} - -func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { - if m != nil { - return m.Nesting - } - return Schema_NestedBlock_INVALID -} - -func (m *Schema_NestedBlock) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Schema_NestedBlock) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -type GetProviderSchema struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } -func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } -func (*GetProviderSchema) ProtoMessage() {} -func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{6} -} - -func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) -} -func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) -} -func (m *GetProviderSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema.Merge(m, src) -} -func (m *GetProviderSchema) XXX_Size() int { - return xxx_messageInfo_GetProviderSchema.Size(m) -} -func (m *GetProviderSchema) XXX_DiscardUnknown() { - xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo - -type GetProviderSchema_Request struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } -func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } -func (*GetProviderSchema_Request) ProtoMessage() {} -func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{6, 0} -} - -func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) -} -func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) -} -func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) -} -func (m *GetProviderSchema_Request) XXX_Size() int { - return xxx_messageInfo_GetProviderSchema_Request.Size(m) -} -func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { - xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo - -type GetProviderSchema_Response struct { - Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` - ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } -func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } -func (*GetProviderSchema_Response) ProtoMessage() {} -func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{6, 1} -} - -func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) -} -func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) -} -func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) -} -func (m *GetProviderSchema_Response) XXX_Size() int { - return xxx_messageInfo_GetProviderSchema_Response.Size(m) -} -func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { - xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo - -func (m *GetProviderSchema_Response) GetProvider() *Schema { - if m != nil { - return m.Provider - } - return nil -} - -func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { - if m != nil { - return m.ResourceSchemas - } - return nil -} - -func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { - if m != nil { - return m.DataSourceSchemas - } - return nil -} - -func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type PrepareProviderConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } -func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } -func (*PrepareProviderConfig) ProtoMessage() {} -func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{7} -} - -func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) -} -func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) -} -func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig.Merge(m, src) -} -func (m *PrepareProviderConfig) XXX_Size() int { - return xxx_messageInfo_PrepareProviderConfig.Size(m) -} -func (m *PrepareProviderConfig) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo - -type PrepareProviderConfig_Request struct { - Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } -func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } -func (*PrepareProviderConfig_Request) ProtoMessage() {} -func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{7, 0} -} - -func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) -} -func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) -} -func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) -} -func (m *PrepareProviderConfig_Request) XXX_Size() int { - return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) -} -func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo - -func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type PrepareProviderConfig_Response struct { - PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } -func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } -func (*PrepareProviderConfig_Response) ProtoMessage() {} -func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{7, 1} -} - -func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) -} -func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) -} -func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) -} -func (m *PrepareProviderConfig_Response) XXX_Size() int { - return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) -} -func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo - -func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { - if m != nil { - return m.PreparedConfig - } - return nil -} - -func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type UpgradeResourceState struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } -func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } -func (*UpgradeResourceState) ProtoMessage() {} -func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{8} -} - -func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) -} -func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) -} -func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState.Merge(m, src) -} -func (m *UpgradeResourceState) XXX_Size() int { - return xxx_messageInfo_UpgradeResourceState.Size(m) -} -func (m *UpgradeResourceState) XXX_DiscardUnknown() { - xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) -} - -var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo - -type UpgradeResourceState_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - // version is the schema_version number recorded in the state file - Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - // raw_state is the raw states as stored for the resource. Core does - // not have access to the schema of prior_version, so it's the - // provider's responsibility to interpret this value using the - // appropriate older schema. The raw_state will be the json encoded - // state, or a legacy flat-mapped format. - RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } -func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } -func (*UpgradeResourceState_Request) ProtoMessage() {} -func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{8, 0} -} - -func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) -} -func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) -} -func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) -} -func (m *UpgradeResourceState_Request) XXX_Size() int { - return xxx_messageInfo_UpgradeResourceState_Request.Size(m) -} -func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { - xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo - -func (m *UpgradeResourceState_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *UpgradeResourceState_Request) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *UpgradeResourceState_Request) GetRawState() *RawState { - if m != nil { - return m.RawState - } - return nil -} - -type UpgradeResourceState_Response struct { - // new_state is a msgpack-encoded data structure that, when interpreted with - // the _current_ schema for this resource type, is functionally equivalent to - // that which was given in prior_state_raw. - UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` - // diagnostics describes any errors encountered during migration that could not - // be safely resolved, and warnings about any possibly-risky assumptions made - // in the upgrade process. - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } -func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } -func (*UpgradeResourceState_Response) ProtoMessage() {} -func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{8, 1} -} - -func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) -} -func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) -} -func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) -} -func (m *UpgradeResourceState_Response) XXX_Size() int { - return xxx_messageInfo_UpgradeResourceState_Response.Size(m) -} -func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { - xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo - -func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { - if m != nil { - return m.UpgradedState - } - return nil -} - -func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ValidateResourceTypeConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } -func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } -func (*ValidateResourceTypeConfig) ProtoMessage() {} -func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{9} -} - -func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) -} -func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) -} -func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) -} -func (m *ValidateResourceTypeConfig) XXX_Size() int { - return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) -} -func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo - -type ValidateResourceTypeConfig_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } -func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } -func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} -func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{9, 0} -} - -func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) -} -func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) -} -func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) -} -func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { - return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) -} -func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo - -func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ValidateResourceTypeConfig_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } -func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } -func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} -func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{9, 1} -} - -func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) -} -func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) -} -func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) -} -func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { - return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) -} -func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo - -func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ValidateDataSourceConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } -func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } -func (*ValidateDataSourceConfig) ProtoMessage() {} -func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{10} -} - -func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) -} -func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) -} -func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) -} -func (m *ValidateDataSourceConfig) XXX_Size() int { - return xxx_messageInfo_ValidateDataSourceConfig.Size(m) -} -func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo - -type ValidateDataSourceConfig_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } -func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } -func (*ValidateDataSourceConfig_Request) ProtoMessage() {} -func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{10, 0} -} - -func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) -} -func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) -} -func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) -} -func (m *ValidateDataSourceConfig_Request) XXX_Size() int { - return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) -} -func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo - -func (m *ValidateDataSourceConfig_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ValidateDataSourceConfig_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } -func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } -func (*ValidateDataSourceConfig_Response) ProtoMessage() {} -func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{10, 1} -} - -func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) -} -func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) -} -func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) -} -func (m *ValidateDataSourceConfig_Response) XXX_Size() int { - return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) -} -func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo - -func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type Configure struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Configure) Reset() { *m = Configure{} } -func (m *Configure) String() string { return proto.CompactTextString(m) } -func (*Configure) ProtoMessage() {} -func (*Configure) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{11} -} - -func (m *Configure) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configure.Unmarshal(m, b) -} -func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configure.Marshal(b, m, deterministic) -} -func (m *Configure) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure.Merge(m, src) -} -func (m *Configure) XXX_Size() int { - return xxx_messageInfo_Configure.Size(m) -} -func (m *Configure) XXX_DiscardUnknown() { - xxx_messageInfo_Configure.DiscardUnknown(m) -} - -var xxx_messageInfo_Configure proto.InternalMessageInfo - -type Configure_Request struct { - TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Configure_Request) Reset() { *m = Configure_Request{} } -func (m *Configure_Request) String() string { return proto.CompactTextString(m) } -func (*Configure_Request) ProtoMessage() {} -func (*Configure_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{11, 0} -} - -func (m *Configure_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configure_Request.Unmarshal(m, b) -} -func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) -} -func (m *Configure_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure_Request.Merge(m, src) -} -func (m *Configure_Request) XXX_Size() int { - return xxx_messageInfo_Configure_Request.Size(m) -} -func (m *Configure_Request) XXX_DiscardUnknown() { - xxx_messageInfo_Configure_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Configure_Request proto.InternalMessageInfo - -func (m *Configure_Request) GetTerraformVersion() string { - if m != nil { - return m.TerraformVersion - } - return "" -} - -func (m *Configure_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type Configure_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Configure_Response) Reset() { *m = Configure_Response{} } -func (m *Configure_Response) String() string { return proto.CompactTextString(m) } -func (*Configure_Response) ProtoMessage() {} -func (*Configure_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{11, 1} -} - -func (m *Configure_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Configure_Response.Unmarshal(m, b) -} -func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) -} -func (m *Configure_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure_Response.Merge(m, src) -} -func (m *Configure_Response) XXX_Size() int { - return xxx_messageInfo_Configure_Response.Size(m) -} -func (m *Configure_Response) XXX_DiscardUnknown() { - xxx_messageInfo_Configure_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Configure_Response proto.InternalMessageInfo - -func (m *Configure_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ReadResource struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResource) Reset() { *m = ReadResource{} } -func (m *ReadResource) String() string { return proto.CompactTextString(m) } -func (*ReadResource) ProtoMessage() {} -func (*ReadResource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{12} -} - -func (m *ReadResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadResource.Unmarshal(m, b) -} -func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) -} -func (m *ReadResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource.Merge(m, src) -} -func (m *ReadResource) XXX_Size() int { - return xxx_messageInfo_ReadResource.Size(m) -} -func (m *ReadResource) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResource proto.InternalMessageInfo - -type ReadResource_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } -func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } -func (*ReadResource_Request) ProtoMessage() {} -func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{12, 0} -} - -func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) -} -func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) -} -func (m *ReadResource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource_Request.Merge(m, src) -} -func (m *ReadResource_Request) XXX_Size() int { - return xxx_messageInfo_ReadResource_Request.Size(m) -} -func (m *ReadResource_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo - -func (m *ReadResource_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ReadResource_Request) GetCurrentState() *DynamicValue { - if m != nil { - return m.CurrentState - } - return nil -} - -func (m *ReadResource_Request) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -type ReadResource_Response struct { - NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } -func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } -func (*ReadResource_Response) ProtoMessage() {} -func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{12, 1} -} - -func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) -} -func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) -} -func (m *ReadResource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource_Response.Merge(m, src) -} -func (m *ReadResource_Response) XXX_Size() int { - return xxx_messageInfo_ReadResource_Response.Size(m) -} -func (m *ReadResource_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo - -func (m *ReadResource_Response) GetNewState() *DynamicValue { - if m != nil { - return m.NewState - } - return nil -} - -func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *ReadResource_Response) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -type PlanResourceChange struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } -func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } -func (*PlanResourceChange) ProtoMessage() {} -func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{13} -} - -func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) -} -func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) -} -func (m *PlanResourceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange.Merge(m, src) -} -func (m *PlanResourceChange) XXX_Size() int { - return xxx_messageInfo_PlanResourceChange.Size(m) -} -func (m *PlanResourceChange) XXX_DiscardUnknown() { - xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) -} - -var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo - -type PlanResourceChange_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` - ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` - Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } -func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } -func (*PlanResourceChange_Request) ProtoMessage() {} -func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{13, 0} -} - -func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) -} -func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) -} -func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) -} -func (m *PlanResourceChange_Request) XXX_Size() int { - return xxx_messageInfo_PlanResourceChange_Request.Size(m) -} -func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { - xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo - -func (m *PlanResourceChange_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { - if m != nil { - return m.PriorState - } - return nil -} - -func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { - if m != nil { - return m.ProposedNewState - } - return nil -} - -func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { - if m != nil { - return m.PriorPrivate - } - return nil -} - -type PlanResourceChange_Response struct { - PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` - RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` - PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - // This may be set only by the helper/schema "SDK" in the main Terraform - // repository, to request that Terraform Core >=0.12 permit additional - // inconsistencies that can result from the legacy SDK type system - // and its imprecise mapping to the >=0.12 type system. - // The change in behavior implied by this flag makes sense only for the - // specific details of the legacy SDK type system, and are not a general - // mechanism to avoid proper type handling in providers. - // - // ==== DO NOT USE THIS ==== - // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== - // ==== DO NOT USE THIS ==== - LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } -func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } -func (*PlanResourceChange_Response) ProtoMessage() {} -func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{13, 1} -} - -func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) -} -func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) -} -func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) -} -func (m *PlanResourceChange_Response) XXX_Size() int { - return xxx_messageInfo_PlanResourceChange_Response.Size(m) -} -func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { - xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo - -func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { - if m != nil { - return m.PlannedState - } - return nil -} - -func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { - if m != nil { - return m.RequiresReplace - } - return nil -} - -func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { - if m != nil { - return m.PlannedPrivate - } - return nil -} - -func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { - if m != nil { - return m.LegacyTypeSystem - } - return false -} - -type ApplyResourceChange struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } -func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } -func (*ApplyResourceChange) ProtoMessage() {} -func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{14} -} - -func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) -} -func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) -} -func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange.Merge(m, src) -} -func (m *ApplyResourceChange) XXX_Size() int { - return xxx_messageInfo_ApplyResourceChange.Size(m) -} -func (m *ApplyResourceChange) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo - -type ApplyResourceChange_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` - PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` - Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } -func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } -func (*ApplyResourceChange_Request) ProtoMessage() {} -func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{14, 0} -} - -func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) -} -func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) -} -func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) -} -func (m *ApplyResourceChange_Request) XXX_Size() int { - return xxx_messageInfo_ApplyResourceChange_Request.Size(m) -} -func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo - -func (m *ApplyResourceChange_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { - if m != nil { - return m.PriorState - } - return nil -} - -func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { - if m != nil { - return m.PlannedState - } - return nil -} - -func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { - if m != nil { - return m.PlannedPrivate - } - return nil -} - -type ApplyResourceChange_Response struct { - NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` - Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - // This may be set only by the helper/schema "SDK" in the main Terraform - // repository, to request that Terraform Core >=0.12 permit additional - // inconsistencies that can result from the legacy SDK type system - // and its imprecise mapping to the >=0.12 type system. - // The change in behavior implied by this flag makes sense only for the - // specific details of the legacy SDK type system, and are not a general - // mechanism to avoid proper type handling in providers. - // - // ==== DO NOT USE THIS ==== - // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== - // ==== DO NOT USE THIS ==== - LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } -func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } -func (*ApplyResourceChange_Response) ProtoMessage() {} -func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{14, 1} -} - -func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) -} -func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) -} -func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) -} -func (m *ApplyResourceChange_Response) XXX_Size() int { - return xxx_messageInfo_ApplyResourceChange_Response.Size(m) -} -func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo - -func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { - if m != nil { - return m.NewState - } - return nil -} - -func (m *ApplyResourceChange_Response) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { - if m != nil { - return m.LegacyTypeSystem - } - return false -} - -type ImportResourceState struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } -func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState) ProtoMessage() {} -func (*ImportResourceState) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15} -} - -func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) -} -func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) -} -func (m *ImportResourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState.Merge(m, src) -} -func (m *ImportResourceState) XXX_Size() int { - return xxx_messageInfo_ImportResourceState.Size(m) -} -func (m *ImportResourceState) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo - -type ImportResourceState_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } -func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState_Request) ProtoMessage() {} -func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15, 0} -} - -func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) -} -func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) -} -func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_Request.Merge(m, src) -} -func (m *ImportResourceState_Request) XXX_Size() int { - return xxx_messageInfo_ImportResourceState_Request.Size(m) -} -func (m *ImportResourceState_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo - -func (m *ImportResourceState_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ImportResourceState_Request) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type ImportResourceState_ImportedResource struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } -func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState_ImportedResource) ProtoMessage() {} -func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15, 1} -} - -func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) -} -func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) -} -func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) -} -func (m *ImportResourceState_ImportedResource) XXX_Size() int { - return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) -} -func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo - -func (m *ImportResourceState_ImportedResource) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { - if m != nil { - return m.State - } - return nil -} - -func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -type ImportResourceState_Response struct { - ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } -func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } -func (*ImportResourceState_Response) ProtoMessage() {} -func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{15, 2} -} - -func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) -} -func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) -} -func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_Response.Merge(m, src) -} -func (m *ImportResourceState_Response) XXX_Size() int { - return xxx_messageInfo_ImportResourceState_Response.Size(m) -} -func (m *ImportResourceState_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo - -func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { - if m != nil { - return m.ImportedResources - } - return nil -} - -func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ReadDataSource struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } -func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } -func (*ReadDataSource) ProtoMessage() {} -func (*ReadDataSource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{16} -} - -func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) -} -func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) -} -func (m *ReadDataSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource.Merge(m, src) -} -func (m *ReadDataSource) XXX_Size() int { - return xxx_messageInfo_ReadDataSource.Size(m) -} -func (m *ReadDataSource) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDataSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo - -type ReadDataSource_Request struct { - TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } -func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } -func (*ReadDataSource_Request) ProtoMessage() {} -func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{16, 0} -} - -func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) -} -func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) -} -func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource_Request.Merge(m, src) -} -func (m *ReadDataSource_Request) XXX_Size() int { - return xxx_messageInfo_ReadDataSource_Request.Size(m) -} -func (m *ReadDataSource_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo - -func (m *ReadDataSource_Request) GetTypeName() string { - if m != nil { - return m.TypeName - } - return "" -} - -func (m *ReadDataSource_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ReadDataSource_Response struct { - State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } -func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } -func (*ReadDataSource_Response) ProtoMessage() {} -func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{16, 1} -} - -func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) -} -func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) -} -func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource_Response.Merge(m, src) -} -func (m *ReadDataSource_Response) XXX_Size() int { - return xxx_messageInfo_ReadDataSource_Response.Size(m) -} -func (m *ReadDataSource_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo - -func (m *ReadDataSource_Response) GetState() *DynamicValue { - if m != nil { - return m.State - } - return nil -} - -func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type GetProvisionerSchema struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } -func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } -func (*GetProvisionerSchema) ProtoMessage() {} -func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{17} -} - -func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) -} -func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) -} -func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema.Merge(m, src) -} -func (m *GetProvisionerSchema) XXX_Size() int { - return xxx_messageInfo_GetProvisionerSchema.Size(m) -} -func (m *GetProvisionerSchema) XXX_DiscardUnknown() { - xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo - -type GetProvisionerSchema_Request struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } -func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } -func (*GetProvisionerSchema_Request) ProtoMessage() {} -func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{17, 0} -} - -func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) -} -func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) -} -func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) -} -func (m *GetProvisionerSchema_Request) XXX_Size() int { - return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) -} -func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { - xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo - -type GetProvisionerSchema_Response struct { - Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } -func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } -func (*GetProvisionerSchema_Response) ProtoMessage() {} -func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{17, 1} -} - -func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) -} -func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) -} -func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) -} -func (m *GetProvisionerSchema_Response) XXX_Size() int { - return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) -} -func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { - xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo - -func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { - if m != nil { - return m.Provisioner - } - return nil -} - -func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ValidateProvisionerConfig struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } -func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } -func (*ValidateProvisionerConfig) ProtoMessage() {} -func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{18} -} - -func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) -} -func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) -} -func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) -} -func (m *ValidateProvisionerConfig) XXX_Size() int { - return xxx_messageInfo_ValidateProvisionerConfig.Size(m) -} -func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo - -type ValidateProvisionerConfig_Request struct { - Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } -func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } -func (*ValidateProvisionerConfig_Request) ProtoMessage() {} -func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{18, 0} -} - -func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) -} -func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) -} -func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) -} -func (m *ValidateProvisionerConfig_Request) XXX_Size() int { - return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) -} -func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo - -func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -type ValidateProvisionerConfig_Response struct { - Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } -func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } -func (*ValidateProvisionerConfig_Response) ProtoMessage() {} -func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{18, 1} -} - -func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) -} -func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) -} -func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) -} -func (m *ValidateProvisionerConfig_Response) XXX_Size() int { - return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) -} -func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo - -func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -type ProvisionResource struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } -func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } -func (*ProvisionResource) ProtoMessage() {} -func (*ProvisionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{19} -} - -func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) -} -func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) -} -func (m *ProvisionResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource.Merge(m, src) -} -func (m *ProvisionResource) XXX_Size() int { - return xxx_messageInfo_ProvisionResource.Size(m) -} -func (m *ProvisionResource) XXX_DiscardUnknown() { - xxx_messageInfo_ProvisionResource.DiscardUnknown(m) -} - -var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo - -type ProvisionResource_Request struct { - Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } -func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } -func (*ProvisionResource_Request) ProtoMessage() {} -func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{19, 0} -} - -func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) -} -func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) -} -func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource_Request.Merge(m, src) -} -func (m *ProvisionResource_Request) XXX_Size() int { - return xxx_messageInfo_ProvisionResource_Request.Size(m) -} -func (m *ProvisionResource_Request) XXX_DiscardUnknown() { - xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo - -func (m *ProvisionResource_Request) GetConfig() *DynamicValue { - if m != nil { - return m.Config - } - return nil -} - -func (m *ProvisionResource_Request) GetConnection() *DynamicValue { - if m != nil { - return m.Connection - } - return nil -} - -type ProvisionResource_Response struct { - Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } -func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } -func (*ProvisionResource_Response) ProtoMessage() {} -func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_17ae6090ff270234, []int{19, 1} -} - -func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) -} -func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) -} -func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource_Response.Merge(m, src) -} -func (m *ProvisionResource_Response) XXX_Size() int { - return xxx_messageInfo_ProvisionResource_Response.Size(m) -} -func (m *ProvisionResource_Response) XXX_DiscardUnknown() { - xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo - -func (m *ProvisionResource_Response) GetOutput() string { - if m != nil { - return m.Output - } - return "" -} - -func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { - if m != nil { - return m.Diagnostics - } - return nil -} - -func init() { - proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) - proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) - proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") - proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") - proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") - proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") - proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") - proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") - proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") - proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") - proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") - proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") - proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") - proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") - proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") - proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") - proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") - proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") - proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") - proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") - proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") - proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") - proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") - proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") - proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") - proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") - proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") - proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") - proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") - proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") - proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") - proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") - proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") - proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") - proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") - proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") - proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") - proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") - proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") - proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") - proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") - proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") - proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") - proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") - proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") - proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") - proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") - proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") - proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") - proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") - proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") - proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") - proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") - proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") - proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") - proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") - proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") - proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") - proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") - proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") -} - -func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) } - -var fileDescriptor_17ae6090ff270234 = []byte{ - // 1880 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x23, 0x49, - 0x19, 0x9f, 0xf6, 0x23, 0xb1, 0x3f, 0xe7, 0xe1, 0xd4, 0xcc, 0x0e, 0xa6, 0x77, 0x17, 0x82, 0x79, - 0x24, 0xab, 0xdd, 0xf1, 0xac, 0x32, 0xb0, 0xbb, 0x84, 0xd1, 0x8a, 0x6c, 0x26, 0x64, 0x22, 0x66, - 0xb2, 0xa1, 0x3c, 0x0f, 0x24, 0xa4, 0xb5, 0x6a, 0xdc, 0x15, 0x4f, 0x33, 0x76, 0x77, 0x6f, 0x75, - 0x39, 0x89, 0x85, 0xc4, 0x05, 0xc1, 0x19, 0x09, 0xf1, 0x90, 0x78, 0x5c, 0x40, 0xe2, 0x1f, 0xe0, - 0x00, 0xdc, 0x38, 0xf1, 0x0f, 0x70, 0x03, 0x4e, 0x08, 0x6e, 0x9c, 0xe1, 0x82, 0x84, 0xea, 0xd5, - 0x5d, 0xb6, 0xdb, 0x4e, 0x4f, 0xb2, 0x23, 0xc4, 0xad, 0xab, 0xbe, 0x5f, 0x7d, 0xdf, 0x57, 0xdf, - 0xab, 0xbe, 0xcf, 0x86, 0x55, 0x7e, 0x1c, 0xf5, 0x87, 0x3d, 0x3f, 0xf8, 0x42, 0x2b, 0x62, 0x21, - 0x0f, 0x51, 0x35, 0xd9, 0x68, 0xde, 0x86, 0xa5, 0x3b, 0xa3, 0x80, 0x0c, 0xfc, 0xee, 0x23, 0xd2, - 0x1f, 0x52, 0xd4, 0x80, 0xc5, 0x41, 0xdc, 0x8b, 0x48, 0xf7, 0x59, 0xc3, 0x59, 0x77, 0x36, 0x97, - 0xb0, 0x59, 0x22, 0x04, 0xa5, 0x6f, 0xc6, 0x61, 0xd0, 0x28, 0xc8, 0x6d, 0xf9, 0xdd, 0xfc, 0x9b, - 0x03, 0x70, 0xc7, 0x27, 0xbd, 0x20, 0x8c, 0xb9, 0xdf, 0x45, 0xdb, 0x50, 0x89, 0xe9, 0x09, 0x65, - 0x3e, 0x1f, 0xc9, 0xd3, 0x2b, 0x5b, 0x9f, 0x68, 0xa5, 0xb2, 0x53, 0x60, 0xab, 0xad, 0x51, 0x38, - 0xc1, 0x0b, 0xc1, 0xf1, 0x70, 0x30, 0x20, 0x6c, 0x24, 0x25, 0x54, 0xb1, 0x59, 0xa2, 0xeb, 0xb0, - 0xe0, 0x51, 0x4e, 0xfc, 0x7e, 0xa3, 0x28, 0x09, 0x7a, 0x85, 0xde, 0x82, 0x2a, 0xe1, 0x9c, 0xf9, - 0x4f, 0x86, 0x9c, 0x36, 0x4a, 0xeb, 0xce, 0x66, 0x6d, 0xab, 0x61, 0x89, 0xdb, 0x31, 0xb4, 0x23, - 0xc2, 0x9f, 0xe2, 0x14, 0xda, 0xbc, 0x09, 0x15, 0x23, 0x1f, 0xd5, 0x60, 0xf1, 0xe0, 0xf0, 0xd1, - 0xce, 0xbd, 0x83, 0x3b, 0xf5, 0x2b, 0xa8, 0x0a, 0xe5, 0x3d, 0x8c, 0xdf, 0xc7, 0x75, 0x47, 0xec, - 0x3f, 0xde, 0xc1, 0x87, 0x07, 0x87, 0xfb, 0xf5, 0x42, 0xf3, 0x2f, 0x0e, 0x2c, 0x8f, 0x71, 0x43, - 0xb7, 0xa0, 0x1c, 0x73, 0x1a, 0xc5, 0x0d, 0x67, 0xbd, 0xb8, 0x59, 0xdb, 0x7a, 0x75, 0x96, 0xd8, - 0x56, 0x9b, 0xd3, 0x08, 0x2b, 0xac, 0xfb, 0x43, 0x07, 0x4a, 0x62, 0x8d, 0x36, 0x60, 0x25, 0xd1, - 0xa6, 0x13, 0x90, 0x01, 0x95, 0xc6, 0xaa, 0xde, 0xbd, 0x82, 0x97, 0x93, 0xfd, 0x43, 0x32, 0xa0, - 0xa8, 0x05, 0x88, 0xf6, 0xe9, 0x80, 0x06, 0xbc, 0xf3, 0x8c, 0x8e, 0x3a, 0x31, 0x67, 0x7e, 0xd0, - 0x53, 0xe6, 0xb9, 0x7b, 0x05, 0xd7, 0x35, 0xed, 0xab, 0x74, 0xd4, 0x96, 0x14, 0xb4, 0x09, 0xab, - 0x36, 0xde, 0x0f, 0xb8, 0x34, 0x59, 0x51, 0x70, 0x4e, 0xc1, 0x07, 0x01, 0x7f, 0x0f, 0x84, 0xa7, - 0xfa, 0xb4, 0xcb, 0x43, 0xd6, 0xbc, 0x25, 0xd4, 0x0a, 0x23, 0xb7, 0x0a, 0x8b, 0x98, 0x7e, 0x38, - 0xa4, 0x31, 0x77, 0xd7, 0xa1, 0x82, 0x69, 0x1c, 0x85, 0x41, 0x4c, 0xd1, 0x35, 0x28, 0xef, 0x31, - 0x16, 0x32, 0xa5, 0x24, 0x56, 0x8b, 0xe6, 0x8f, 0x1c, 0xa8, 0x60, 0x72, 0xda, 0xe6, 0x84, 0xd3, - 0x24, 0x34, 0x9c, 0x34, 0x34, 0xd0, 0x36, 0x2c, 0x1e, 0xf7, 0x09, 0x1f, 0x90, 0xa8, 0x51, 0x90, - 0x46, 0x5a, 0xb7, 0x8c, 0x64, 0x4e, 0xb6, 0xbe, 0xa2, 0x20, 0x7b, 0x01, 0x67, 0x23, 0x6c, 0x0e, - 0xb8, 0xdb, 0xb0, 0x64, 0x13, 0x50, 0x1d, 0x8a, 0xcf, 0xe8, 0x48, 0x2b, 0x20, 0x3e, 0x85, 0x52, - 0x27, 0x22, 0x5e, 0x75, 0xac, 0xa8, 0xc5, 0x76, 0xe1, 0x1d, 0xa7, 0xf9, 0x8f, 0x32, 0x2c, 0xb4, - 0xbb, 0x4f, 0xe9, 0x80, 0x88, 0x90, 0x3a, 0xa1, 0x2c, 0xf6, 0xb5, 0x66, 0x45, 0x6c, 0x96, 0xe8, - 0x06, 0x94, 0x9f, 0xf4, 0xc3, 0xee, 0x33, 0x79, 0xbc, 0xb6, 0xf5, 0x31, 0x4b, 0x35, 0x75, 0xb6, - 0xf5, 0x9e, 0x20, 0x63, 0x85, 0x72, 0x7f, 0xe1, 0x40, 0x59, 0x6e, 0xcc, 0x61, 0xf9, 0x25, 0x80, - 0xc4, 0x79, 0xb1, 0xbe, 0xf2, 0xcb, 0xd3, 0x7c, 0x93, 0xf0, 0xc0, 0x16, 0x1c, 0xbd, 0x0b, 0x35, - 0x29, 0xa9, 0xc3, 0x47, 0x11, 0x8d, 0x1b, 0xc5, 0xa9, 0xa8, 0xd2, 0xa7, 0x0f, 0x69, 0xcc, 0xa9, - 0xa7, 0x74, 0x03, 0x79, 0xe2, 0x81, 0x38, 0xe0, 0xfe, 0xd1, 0x81, 0x6a, 0xc2, 0x59, 0xb8, 0x23, - 0x8d, 0x2a, 0x2c, 0xbf, 0xc5, 0x9e, 0xe0, 0x6d, 0xb2, 0x57, 0x7c, 0xa3, 0x75, 0xa8, 0x79, 0x34, - 0xee, 0x32, 0x3f, 0xe2, 0xe2, 0x42, 0x2a, 0xbb, 0xec, 0x2d, 0xe4, 0x42, 0x85, 0xd1, 0x0f, 0x87, - 0x3e, 0xa3, 0x9e, 0xcc, 0xb0, 0x0a, 0x4e, 0xd6, 0x82, 0x16, 0x4a, 0x14, 0xe9, 0x37, 0xca, 0x8a, - 0x66, 0xd6, 0x82, 0xd6, 0x0d, 0x07, 0xd1, 0x90, 0x53, 0xaf, 0xb1, 0xa0, 0x68, 0x66, 0x8d, 0x5e, - 0x81, 0x6a, 0x4c, 0x83, 0xd8, 0xe7, 0xfe, 0x09, 0x6d, 0x2c, 0x4a, 0x62, 0xba, 0xe1, 0xfe, 0xba, - 0x00, 0x35, 0xeb, 0x96, 0xe8, 0x65, 0xa8, 0x0a, 0x5d, 0xad, 0x34, 0xc1, 0x15, 0xb1, 0x21, 0xf3, - 0xe3, 0xf9, 0xdc, 0x88, 0x76, 0x61, 0x31, 0xa0, 0x31, 0x17, 0x39, 0x54, 0x94, 0xd5, 0xe9, 0xb5, - 0xb9, 0x16, 0x96, 0xdf, 0x7e, 0xd0, 0xbb, 0x1f, 0x7a, 0x14, 0x9b, 0x93, 0x42, 0xa1, 0x81, 0x1f, - 0x74, 0x7c, 0x4e, 0x07, 0xb1, 0xb4, 0x49, 0x11, 0x57, 0x06, 0x7e, 0x70, 0x20, 0xd6, 0x92, 0x48, - 0xce, 0x34, 0xb1, 0xac, 0x89, 0xe4, 0x4c, 0x12, 0x9b, 0xf7, 0xd5, 0xcd, 0x34, 0xc7, 0xf1, 0xd2, - 0x03, 0xb0, 0xd0, 0x3e, 0x38, 0xdc, 0xbf, 0xb7, 0x57, 0x77, 0x50, 0x05, 0x4a, 0xf7, 0x0e, 0xda, - 0x0f, 0xea, 0x05, 0xb4, 0x08, 0xc5, 0xf6, 0xde, 0x83, 0x7a, 0x51, 0x7c, 0xdc, 0xdf, 0x39, 0xaa, - 0x97, 0x44, 0x89, 0xda, 0xc7, 0xef, 0x3f, 0x3c, 0xaa, 0x97, 0x9b, 0x3f, 0x29, 0xc1, 0xda, 0x3e, - 0xe5, 0x47, 0x2c, 0x3c, 0xf1, 0x3d, 0xca, 0x94, 0xfe, 0x76, 0x12, 0xff, 0xab, 0x68, 0x65, 0xf1, - 0x0d, 0xa8, 0x44, 0x1a, 0x29, 0xcd, 0x58, 0xdb, 0x5a, 0x9b, 0xba, 0x3c, 0x4e, 0x20, 0x88, 0x42, - 0x9d, 0xd1, 0x38, 0x1c, 0xb2, 0x2e, 0xed, 0xc4, 0x92, 0x68, 0x62, 0x7a, 0xdb, 0x3a, 0x36, 0x25, - 0xbe, 0x65, 0xe4, 0x89, 0x0f, 0x79, 0x5a, 0xed, 0xc7, 0x2a, 0xc1, 0x57, 0xd9, 0xf8, 0x2e, 0xea, - 0xc3, 0x55, 0x8f, 0x70, 0xd2, 0x99, 0x90, 0xa4, 0xe2, 0xff, 0x76, 0x3e, 0x49, 0x77, 0x08, 0x27, - 0xed, 0x69, 0x59, 0x6b, 0xde, 0xe4, 0x3e, 0x7a, 0x1b, 0x6a, 0x5e, 0xf2, 0x06, 0x09, 0xe7, 0x09, - 0x29, 0x2f, 0x65, 0xbe, 0x50, 0xd8, 0x46, 0xba, 0x0f, 0xe1, 0x5a, 0xd6, 0x7d, 0x32, 0xea, 0xd2, - 0x86, 0x5d, 0x97, 0x32, 0x6d, 0x9c, 0x96, 0x2a, 0xf7, 0x31, 0x5c, 0xcf, 0x56, 0xfe, 0x92, 0x8c, - 0x9b, 0x7f, 0x76, 0xe0, 0xa5, 0x23, 0x46, 0x23, 0xc2, 0xa8, 0xb1, 0xda, 0x6e, 0x18, 0x1c, 0xfb, - 0x3d, 0x77, 0x3b, 0x09, 0x0f, 0x74, 0x13, 0x16, 0xba, 0x72, 0x53, 0xc7, 0x83, 0x9d, 0x3d, 0x76, - 0x4b, 0x80, 0x35, 0xcc, 0xfd, 0xae, 0x63, 0xc5, 0xd3, 0x97, 0x61, 0x35, 0x52, 0x12, 0xbc, 0x4e, - 0x3e, 0x36, 0x2b, 0x06, 0xaf, 0x54, 0x99, 0xf4, 0x46, 0x21, 0xaf, 0x37, 0x9a, 0xdf, 0x2f, 0xc0, - 0xb5, 0x87, 0x51, 0x8f, 0x11, 0x8f, 0x26, 0x5e, 0x11, 0x8f, 0x89, 0xcb, 0xd2, 0xcb, 0xcd, 0x2d, - 0x1b, 0x56, 0x11, 0x2f, 0x8c, 0x17, 0xf1, 0x37, 0xa1, 0xca, 0xc8, 0x69, 0x27, 0x16, 0xec, 0x64, - 0x8d, 0xa8, 0x6d, 0x5d, 0xcd, 0x78, 0xb6, 0x70, 0x85, 0xe9, 0x2f, 0xf7, 0x3b, 0xb6, 0x51, 0xde, - 0x85, 0x95, 0xa1, 0x52, 0xcc, 0xd3, 0x3c, 0xce, 0xb1, 0xc9, 0xb2, 0x81, 0xab, 0x77, 0xf4, 0xc2, - 0x26, 0xf9, 0xbd, 0x03, 0xee, 0x23, 0xd2, 0xf7, 0x3d, 0xa1, 0x9c, 0xb6, 0x89, 0x78, 0x19, 0xb4, - 0xd7, 0x1f, 0xe7, 0x34, 0x4c, 0x1a, 0x12, 0x85, 0x7c, 0x21, 0xb1, 0x6b, 0x5d, 0x7e, 0x42, 0x79, - 0x27, 0xb7, 0xf2, 0xbf, 0x75, 0xa0, 0x61, 0x94, 0x4f, 0xf3, 0xe1, 0xff, 0x42, 0xf5, 0xdf, 0x39, - 0x50, 0x55, 0x8a, 0x0e, 0x19, 0x75, 0x7b, 0xa9, 0xae, 0xaf, 0xc3, 0x1a, 0xa7, 0x8c, 0x91, 0xe3, - 0x90, 0x0d, 0x3a, 0x76, 0xc7, 0x50, 0xc5, 0xf5, 0x84, 0xf0, 0x48, 0x47, 0xdd, 0xff, 0x46, 0xf7, - 0x5f, 0x15, 0x60, 0x09, 0x53, 0xe2, 0x99, 0x78, 0x71, 0xbf, 0x9d, 0xd3, 0xd4, 0xb7, 0x61, 0xb9, - 0x3b, 0x64, 0x4c, 0x74, 0x99, 0x2a, 0xc8, 0xcf, 0xd1, 0x7a, 0x49, 0xa3, 0x55, 0x8c, 0x37, 0x60, - 0x31, 0x62, 0xfe, 0x89, 0x49, 0xb0, 0x25, 0x6c, 0x96, 0xee, 0x0f, 0xec, 0x54, 0xfa, 0x3c, 0x54, - 0x03, 0x7a, 0x9a, 0x2f, 0x8b, 0x2a, 0x01, 0x3d, 0xbd, 0x5c, 0x02, 0xcd, 0xd6, 0xaa, 0xf9, 0x9b, - 0x12, 0xa0, 0xa3, 0x3e, 0x09, 0x8c, 0x99, 0x76, 0x9f, 0x92, 0xa0, 0x47, 0xdd, 0xff, 0x38, 0x39, - 0xad, 0xf5, 0x0e, 0xd4, 0x22, 0xe6, 0x87, 0x2c, 0x9f, 0xad, 0x40, 0x62, 0xd5, 0x65, 0xf6, 0x00, - 0x45, 0x2c, 0x8c, 0xc2, 0x98, 0x7a, 0x9d, 0xd4, 0x16, 0xc5, 0xf9, 0x0c, 0xea, 0xe6, 0xc8, 0xa1, - 0xb1, 0x49, 0x1a, 0x5d, 0xa5, 0x5c, 0xd1, 0x85, 0x3e, 0x0d, 0xcb, 0x4a, 0x63, 0x63, 0x91, 0xb2, - 0xb4, 0xc8, 0x92, 0xdc, 0x3c, 0xd2, 0xce, 0xfa, 0x79, 0xc1, 0x72, 0xd6, 0x6d, 0x58, 0x8e, 0xfa, - 0x24, 0x08, 0xf2, 0x96, 0xbd, 0x25, 0x8d, 0x56, 0x0a, 0xee, 0x8a, 0x5e, 0x43, 0x36, 0x95, 0x71, - 0x87, 0xd1, 0xa8, 0x4f, 0xba, 0x54, 0x7b, 0x6e, 0xf6, 0x38, 0xb7, 0x6a, 0x4e, 0x60, 0x75, 0x00, - 0x6d, 0xc0, 0xaa, 0x51, 0x61, 0xdc, 0x91, 0x2b, 0x7a, 0x5b, 0x2b, 0x7e, 0xe1, 0x26, 0x00, 0xbd, - 0x01, 0xa8, 0x4f, 0x7b, 0xa4, 0x3b, 0x92, 0x4d, 0x7a, 0x27, 0x1e, 0xc5, 0x9c, 0x0e, 0x74, 0xe7, - 0x5b, 0x57, 0x14, 0x51, 0x72, 0xdb, 0x72, 0xbf, 0xf9, 0xa7, 0x22, 0x5c, 0xdd, 0x89, 0xa2, 0xfe, - 0x68, 0x22, 0x6e, 0xfe, 0xfd, 0xe2, 0xe3, 0x66, 0xca, 0x1b, 0xc5, 0xe7, 0xf1, 0xc6, 0x73, 0x87, - 0x4b, 0x86, 0xe5, 0xcb, 0x59, 0x96, 0x77, 0xff, 0x70, 0xf9, 0xfc, 0xb6, 0xd2, 0xb4, 0x30, 0x96, - 0xa6, 0x93, 0x6e, 0x2d, 0x5e, 0xd2, 0xad, 0xa5, 0x19, 0x6e, 0xfd, 0x67, 0x01, 0xae, 0x1e, 0x0c, - 0xa2, 0x90, 0xf1, 0xf1, 0xd6, 0xe3, 0xad, 0x9c, 0x5e, 0x5d, 0x81, 0x82, 0xef, 0xe9, 0xa1, 0xb5, - 0xe0, 0x7b, 0xee, 0x19, 0xd4, 0x15, 0x3b, 0x9a, 0xd4, 0xe1, 0x73, 0x47, 0x9e, 0x5c, 0x01, 0xa1, - 0x50, 0x73, 0xaa, 0xed, 0x2f, 0x6d, 0x6f, 0x7c, 0x00, 0xc8, 0xd7, 0x6a, 0x74, 0x4c, 0x8f, 0x6e, - 0xde, 0x92, 0x9b, 0x96, 0x88, 0x8c, 0xab, 0xb7, 0x26, 0xf5, 0xc7, 0x6b, 0xfe, 0xc4, 0x4e, 0x7c, - 0xf1, 0xc6, 0xe6, 0xaf, 0x0e, 0xac, 0x88, 0x47, 0x2a, 0xed, 0x0b, 0x5e, 0x5c, 0x47, 0xc0, 0xc6, - 0xc6, 0xa5, 0x72, 0xae, 0xd0, 0xd4, 0x66, 0xbe, 0xf0, 0xfd, 0x7e, 0xea, 0xc0, 0x35, 0x33, 0xdb, - 0x88, 0x5e, 0x20, 0x6b, 0x8e, 0x3b, 0xb3, 0xf4, 0xba, 0x25, 0xaa, 0x42, 0x82, 0x9d, 0x3d, 0xc9, - 0xd9, 0xa8, 0x8b, 0x6b, 0xf7, 0x33, 0x07, 0x3e, 0x6e, 0x3a, 0x33, 0x4b, 0xc5, 0x8f, 0x60, 0x96, - 0xf8, 0x48, 0x3a, 0x98, 0xbf, 0x3b, 0xb0, 0x96, 0xa8, 0x95, 0xb4, 0x31, 0xf1, 0xc5, 0xd5, 0x42, - 0x6f, 0x03, 0x74, 0xc3, 0x20, 0xa0, 0x5d, 0x6e, 0x86, 0x83, 0x79, 0x35, 0x37, 0x85, 0xba, 0xdf, - 0xb0, 0xee, 0x73, 0x1d, 0x16, 0xc2, 0x21, 0x8f, 0x86, 0x5c, 0x87, 0xa4, 0x5e, 0x5d, 0xd8, 0x0d, - 0x5b, 0x3f, 0xae, 0x42, 0xc5, 0xcc, 0x71, 0xe8, 0xeb, 0x50, 0xdd, 0xa7, 0x5c, 0xff, 0xc2, 0xf5, - 0x99, 0x73, 0x46, 0x64, 0x15, 0x40, 0x9f, 0xcd, 0x35, 0x48, 0xa3, 0xfe, 0x8c, 0xa1, 0x11, 0x6d, - 0x5a, 0xe7, 0x33, 0x11, 0x89, 0xa4, 0xd7, 0x72, 0x20, 0xb5, 0xb4, 0x6f, 0xcd, 0x9b, 0x58, 0xd0, - 0x0d, 0x8b, 0xd1, 0x6c, 0x58, 0x22, 0xb7, 0x95, 0x17, 0xae, 0x85, 0x0f, 0x67, 0x4f, 0x1c, 0xe8, - 0xf5, 0x0c, 0x5e, 0x93, 0xa0, 0x44, 0xf0, 0x1b, 0xf9, 0xc0, 0x5a, 0xac, 0x9f, 0x3d, 0xb8, 0xa2, - 0x0d, 0x8b, 0x4b, 0x16, 0x20, 0x11, 0xb7, 0x79, 0x3e, 0x50, 0x8b, 0xba, 0x6b, 0x0d, 0x26, 0xe8, - 0x15, 0xeb, 0x58, 0xb2, 0x9b, 0x30, 0x7d, 0x75, 0x06, 0x55, 0x73, 0xfa, 0xda, 0xf8, 0x98, 0x80, - 0x3e, 0x69, 0x0f, 0xc4, 0x16, 0x21, 0xe1, 0xb7, 0x3e, 0x1b, 0xa0, 0x59, 0x76, 0xb3, 0x5a, 0x6a, - 0x64, 0x87, 0xe9, 0x34, 0x39, 0x61, 0xff, 0xb9, 0xf3, 0x60, 0x5a, 0xc8, 0x71, 0x66, 0x03, 0x86, - 0xec, 0xe3, 0x19, 0xf4, 0x44, 0xcc, 0xc6, 0xb9, 0xb8, 0x54, 0x4e, 0xc6, 0xb3, 0x38, 0x26, 0x27, - 0xeb, 0xd9, 0xcc, 0x92, 0x93, 0x8d, 0xd3, 0x72, 0x1e, 0x4f, 0xbe, 0x84, 0xe8, 0x53, 0x13, 0x86, - 0x4e, 0x49, 0x09, 0xf7, 0xe6, 0x3c, 0x88, 0x66, 0xfc, 0x45, 0xf5, 0xfb, 0x3f, 0x1a, 0xfb, 0xf9, - 0x94, 0x87, 0x51, 0xc2, 0xa4, 0x31, 0x4d, 0x50, 0x47, 0xb7, 0xbe, 0x57, 0x84, 0x9a, 0xf5, 0x30, - 0xa0, 0x0f, 0xec, 0xe2, 0xb4, 0x91, 0x51, 0x76, 0xec, 0x37, 0x2e, 0x33, 0xaa, 0x67, 0x00, 0xb5, - 0xaa, 0x67, 0x73, 0xde, 0x23, 0x94, 0x95, 0x8b, 0x53, 0xa8, 0x44, 0xe8, 0x8d, 0x9c, 0x68, 0x2d, - 0xf9, 0x49, 0xc6, 0x53, 0x33, 0x56, 0x7e, 0xa7, 0xa8, 0x99, 0xe5, 0x37, 0x0b, 0xa5, 0x24, 0xbc, - 0xe9, 0x5c, 0xc2, 0x11, 0x4f, 0x16, 0xe4, 0x1f, 0x7b, 0xb7, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, - 0x8a, 0x61, 0xfa, 0xcc, 0xeb, 0x1b, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ProviderClient is the client API for Provider service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ProviderClient interface { - //////// Information about what a provider supports/expects - GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) - PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) - ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) - ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) - UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) - //////// One-time initialization, called before other functions below - Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) - //////// Managed Resource Lifecycle - ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) - PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) - ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) - ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) - ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) - //////// Graceful Shutdown - Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) -} - -type providerClient struct { - cc *grpc.ClientConn -} - -func NewProviderClient(cc *grpc.ClientConn) ProviderClient { - return &providerClient{cc} -} - -func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { - out := new(GetProviderSchema_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { - out := new(PrepareProviderConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { - out := new(ValidateResourceTypeConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { - out := new(ValidateDataSourceConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { - out := new(UpgradeResourceState_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { - out := new(Configure_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { - out := new(ReadResource_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { - out := new(PlanResourceChange_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { - out := new(ApplyResourceChange_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { - out := new(ImportResourceState_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { - out := new(ReadDataSource_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { - out := new(Stop_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ProviderServer is the server API for Provider service. -type ProviderServer interface { - //////// Information about what a provider supports/expects - GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) - PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) - ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) - ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) - UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) - //////// One-time initialization, called before other functions below - Configure(context.Context, *Configure_Request) (*Configure_Response, error) - //////// Managed Resource Lifecycle - ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) - PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) - ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) - ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) - ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) - //////// Graceful Shutdown - Stop(context.Context, *Stop_Request) (*Stop_Response, error) -} - -// UnimplementedProviderServer can be embedded to have forward compatible implementations. -type UnimplementedProviderServer struct { -} - -func (*UnimplementedProviderServer) GetSchema(ctx context.Context, req *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") -} -func (*UnimplementedProviderServer) PrepareProviderConfig(ctx context.Context, req *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") -} -func (*UnimplementedProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") -} -func (*UnimplementedProviderServer) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") -} -func (*UnimplementedProviderServer) UpgradeResourceState(ctx context.Context, req *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") -} -func (*UnimplementedProviderServer) Configure(ctx context.Context, req *Configure_Request) (*Configure_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") -} -func (*UnimplementedProviderServer) ReadResource(ctx context.Context, req *ReadResource_Request) (*ReadResource_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") -} -func (*UnimplementedProviderServer) PlanResourceChange(ctx context.Context, req *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") -} -func (*UnimplementedProviderServer) ApplyResourceChange(ctx context.Context, req *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") -} -func (*UnimplementedProviderServer) ImportResourceState(ctx context.Context, req *ImportResourceState_Request) (*ImportResourceState_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") -} -func (*UnimplementedProviderServer) ReadDataSource(ctx context.Context, req *ReadDataSource_Request) (*ReadDataSource_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") -} -func (*UnimplementedProviderServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") -} - -func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { - s.RegisterService(&_Provider_serviceDesc, srv) -} - -func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetProviderSchema_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).GetSchema(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/GetSchema", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PrepareProviderConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).PrepareProviderConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateResourceTypeConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateDataSourceConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpgradeResourceState_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).UpgradeResourceState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/UpgradeResourceState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Configure_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).Configure(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/Configure", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadResource_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ReadResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ReadResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PlanResourceChange_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).PlanResourceChange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/PlanResourceChange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ApplyResourceChange_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ApplyResourceChange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ApplyResourceChange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ImportResourceState_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ImportResourceState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ImportResourceState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadDataSource_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).ReadDataSource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/ReadDataSource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Stop_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProviderServer).Stop(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provider/Stop", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) - } - return interceptor(ctx, in, info, handler) -} - -var _Provider_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tfplugin5.Provider", - HandlerType: (*ProviderServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSchema", - Handler: _Provider_GetSchema_Handler, - }, - { - MethodName: "PrepareProviderConfig", - Handler: _Provider_PrepareProviderConfig_Handler, - }, - { - MethodName: "ValidateResourceTypeConfig", - Handler: _Provider_ValidateResourceTypeConfig_Handler, - }, - { - MethodName: "ValidateDataSourceConfig", - Handler: _Provider_ValidateDataSourceConfig_Handler, - }, - { - MethodName: "UpgradeResourceState", - Handler: _Provider_UpgradeResourceState_Handler, - }, - { - MethodName: "Configure", - Handler: _Provider_Configure_Handler, - }, - { - MethodName: "ReadResource", - Handler: _Provider_ReadResource_Handler, - }, - { - MethodName: "PlanResourceChange", - Handler: _Provider_PlanResourceChange_Handler, - }, - { - MethodName: "ApplyResourceChange", - Handler: _Provider_ApplyResourceChange_Handler, - }, - { - MethodName: "ImportResourceState", - Handler: _Provider_ImportResourceState_Handler, - }, - { - MethodName: "ReadDataSource", - Handler: _Provider_ReadDataSource_Handler, - }, - { - MethodName: "Stop", - Handler: _Provider_Stop_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tfplugin5.proto", -} - -// ProvisionerClient is the client API for Provisioner service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ProvisionerClient interface { - GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) - ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) - ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) - Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) -} - -type provisionerClient struct { - cc *grpc.ClientConn -} - -func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { - return &provisionerClient{cc} -} - -func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { - out := new(GetProvisionerSchema_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { - out := new(ValidateProvisionerConfig_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { - stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) - if err != nil { - return nil, err - } - x := &provisionerProvisionResourceClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Provisioner_ProvisionResourceClient interface { - Recv() (*ProvisionResource_Response, error) - grpc.ClientStream -} - -type provisionerProvisionResourceClient struct { - grpc.ClientStream -} - -func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { - m := new(ProvisionResource_Response) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { - out := new(Stop_Response) - err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ProvisionerServer is the server API for Provisioner service. -type ProvisionerServer interface { - GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) - ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) - ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error - Stop(context.Context, *Stop_Request) (*Stop_Response, error) -} - -// UnimplementedProvisionerServer can be embedded to have forward compatible implementations. -type UnimplementedProvisionerServer struct { -} - -func (*UnimplementedProvisionerServer) GetSchema(ctx context.Context, req *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") -} -func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(ctx context.Context, req *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") -} -func (*UnimplementedProvisionerServer) ProvisionResource(req *ProvisionResource_Request, srv Provisioner_ProvisionResourceServer) error { - return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") -} -func (*UnimplementedProvisionerServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") -} - -func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { - s.RegisterService(&_Provisioner_serviceDesc, srv) -} - -func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetProvisionerSchema_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProvisionerServer).GetSchema(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provisioner/GetSchema", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateProvisionerConfig_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ProvisionResource_Request) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) -} - -type Provisioner_ProvisionResourceServer interface { - Send(*ProvisionResource_Response) error - grpc.ServerStream -} - -type provisionerProvisionResourceServer struct { - grpc.ServerStream -} - -func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { - return x.ServerStream.SendMsg(m) -} - -func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Stop_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ProvisionerServer).Stop(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tfplugin5.Provisioner/Stop", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) - } - return interceptor(ctx, in, info, handler) -} - -var _Provisioner_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tfplugin5.Provisioner", - HandlerType: (*ProvisionerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSchema", - Handler: _Provisioner_GetSchema_Handler, - }, - { - MethodName: "ValidateProvisionerConfig", - Handler: _Provisioner_ValidateProvisionerConfig_Handler, - }, - { - MethodName: "Stop", - Handler: _Provisioner_Stop_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ProvisionResource", - Handler: _Provisioner_ProvisionResource_Handler, - ServerStreams: true, - }, - }, - Metadata: "tfplugin5.proto", -} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go deleted file mode 100644 index 8f89909c..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package blocktoattr includes some helper functions that can perform -// preprocessing on a HCL body where a configschema.Block schema is available -// in order to allow list and set attributes defined in the schema to be -// optionally written by the user as block syntax. -package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go deleted file mode 100644 index 0af708ec..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go +++ /dev/null @@ -1,187 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization -// functionality to allow attributes that are specified as having list or set -// type in the schema to be written with HCL block syntax as multiple nested -// blocks with the attribute name as the block type. -// -// This partially restores some of the block/attribute confusion from HCL 1 -// so that existing patterns that depended on that confusion can continue to -// be used in the short term while we settle on a longer-term strategy. -// -// Most of the fixup work is actually done when the returned body is -// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual -// decode of the body might not, if the content of the body is so ambiguous -// that there's no safe way to map it to the schema. -func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { - // The schema should never be nil, but in practice it seems to be sometimes - // in the presence of poorly-configured test mocks, so we'll be robust - // by synthesizing an empty one. - if schema == nil { - schema = &configschema.Block{} - } - - return &fixupBody{ - original: body, - schema: schema, - names: ambiguousNames(schema), - } -} - -type fixupBody struct { - original hcl.Body - schema *configschema.Block - names map[string]struct{} -} - -// Content decodes content from the body. The given schema must be the lower-level -// representation of the same schema that was previously passed to FixUpBlockAttrs, -// or else the result is undefined. -func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, diags := b.original.Content(schema) - return b.fixupContent(content), diags -} - -func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { - schema = b.effectiveSchema(schema) - content, remain, diags := b.original.PartialContent(schema) - remain = &fixupBody{ - original: remain, - schema: b.schema, - names: b.names, - } - return b.fixupContent(content), remain, diags -} - -func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { - // FixUpBlockAttrs is not intended to be used in situations where we'd use - // JustAttributes, so we just pass this through verbatim to complete our - // implementation of hcl.Body. - return b.original.JustAttributes() -} - -func (b *fixupBody) MissingItemRange() hcl.Range { - return b.original.MissingItemRange() -} - -// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's -// content to determine whether the author has used attribute or block syntax -// for each of the ambigious attributes where both are permitted. -// -// The resulting schema will always contain all of the same names that are -// in the given schema, but some attribute schemas may instead be replaced by -// block header schemas. -func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { - return effectiveSchema(given, b.original, b.names, true) -} - -func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { - var ret hcl.BodyContent - ret.Attributes = make(hcl.Attributes) - for name, attr := range content.Attributes { - ret.Attributes[name] = attr - } - blockAttrVals := make(map[string][]*hcl.Block) - for _, block := range content.Blocks { - if _, exists := b.names[block.Type]; exists { - // If we get here then we've found a block type whose instances need - // to be re-interpreted as a list-of-objects attribute. We'll gather - // those up and fix them up below. - blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) - continue - } - - // We need to now re-wrap our inner body so it will be subject to the - // same attribute-as-block fixup when recursively decoded. - retBlock := *block // shallow copy - if blockS, ok := b.schema.BlockTypes[block.Type]; ok { - // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then - retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) - } - - ret.Blocks = append(ret.Blocks, &retBlock) - } - // No we'll install synthetic attributes for each of our fixups. We can't - // do this exactly because HCL's information model expects an attribute - // to be a single decl but we have multiple separate blocks. We'll - // approximate things, then, by using only our first block for the source - // location information. (We are guaranteed at least one by the above logic.) - for name, blocks := range blockAttrVals { - ret.Attributes[name] = &hcl.Attribute{ - Name: name, - Expr: &fixupBlocksExpr{ - blocks: blocks, - ety: b.schema.Attributes[name].Type.ElementType(), - }, - - Range: blocks[0].DefRange, - NameRange: blocks[0].TypeRange, - } - } - return &ret -} - -type fixupBlocksExpr struct { - blocks hcl.Blocks - ety cty.Type -} - -func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { - // In order to produce a suitable value for our expression we need to - // now decode the whole descendent block structure under each of our block - // bodies. - // - // That requires us to do something rather strange: we must construct a - // synthetic block type schema derived from the element type of the - // attribute, thus inverting our usual direction of lowering a schema - // into an implied type. Because a type is less detailed than a schema, - // the result is imprecise and in particular will just consider all - // the attributes to be optional and let the provider eventually decide - // whether to return errors if they turn out to be null when required. - schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety - spec := schema.DecoderSpec() - - vals := make([]cty.Value, len(e.blocks)) - var diags hcl.Diagnostics - for i, block := range e.blocks { - body := FixUpBlockAttrs(block.Body, schema) - val, blockDiags := hcldec.Decode(body, spec, ctx) - diags = append(diags, blockDiags...) - if val == cty.NilVal { - val = cty.UnknownVal(e.ety) - } - vals[i] = val - } - if len(vals) == 0 { - return cty.ListValEmpty(e.ety), diags - } - return cty.ListVal(vals), diags -} - -func (e *fixupBlocksExpr) Variables() []hcl.Traversal { - var ret []hcl.Traversal - schema := SchemaForCtyElementType(e.ety) - spec := schema.DecoderSpec() - for _, block := range e.blocks { - ret = append(ret, hcldec.Variables(block.Body, spec)...) - } - return ret -} - -func (e *fixupBlocksExpr) Range() hcl.Range { - // This is not really an appropriate range for the expression but it's - // the best we can do from here. - return e.blocks[0].DefRange -} - -func (e *fixupBlocksExpr) StartRange() hcl.Range { - return e.blocks[0].DefRange -} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go deleted file mode 100644 index 31e010cc..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go +++ /dev/null @@ -1,146 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func ambiguousNames(schema *configschema.Block) map[string]struct{} { - if schema == nil { - return nil - } - ambiguousNames := make(map[string]struct{}) - for name, attrS := range schema.Attributes { - aty := attrS.Type - if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { - ambiguousNames[name] = struct{}{} - } - } - return ambiguousNames -} - -func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { - ret := &hcl.BodySchema{} - - appearsAsBlock := make(map[string]struct{}) - { - // We'll construct some throwaway schemas here just to probe for - // whether each of our ambiguous names seems to be being used as - // an attribute or a block. We need to check both because in JSON - // syntax we rely on the schema to decide between attribute or block - // interpretation and so JSON will always answer yes to both of - // these questions and we want to prefer the attribute interpretation - // in that case. - var probeSchema hcl.BodySchema - - for name := range ambiguousNames { - probeSchema = hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: name, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - if _, exists := content.Attributes[name]; exists { - // Can decode as an attribute, so we'll go with that. - continue - } - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: name, - }, - }, - } - content, _, _ = body.PartialContent(&probeSchema) - if len(content.Blocks) > 0 || dynamicExpanded { - // A dynamic block with an empty iterator returns nothing. - // If there's no attribute and we have either a block or a - // dynamic expansion, we need to rewrite this one as a - // block for a successful result. - appearsAsBlock[name] = struct{}{} - } - } - if !dynamicExpanded { - // If we're deciding for a context where dynamic blocks haven't - // been expanded yet then we need to probe for those too. - probeSchema = hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "dynamic", - LabelNames: []string{"type"}, - }, - }, - } - content, _, _ := body.PartialContent(&probeSchema) - for _, block := range content.Blocks { - if _, exists := ambiguousNames[block.Labels[0]]; exists { - appearsAsBlock[block.Labels[0]] = struct{}{} - } - } - } - } - - for _, attrS := range given.Attributes { - if _, exists := appearsAsBlock[attrS.Name]; exists { - ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ - Type: attrS.Name, - }) - } else { - ret.Attributes = append(ret.Attributes, attrS) - } - } - - // Anything that is specified as a block type in the input schema remains - // that way by just passing through verbatim. - ret.Blocks = append(ret.Blocks, given.Blocks...) - - return ret -} - -// SchemaForCtyElementType converts a cty object type into an -// approximately-equivalent configschema.Block representing the element of -// a list or set. If the given type is not an object type then this -// function will panic. -func SchemaForCtyElementType(ty cty.Type) *configschema.Block { - atys := ty.AttributeTypes() - ret := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute, len(atys)), - } - for name, aty := range atys { - ret.Attributes[name] = &configschema.Attribute{ - Type: aty, - Optional: true, - } - } - return ret -} - -// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type -// into an approximately-equivalent configschema.NestedBlock. If the given type -// is not of the expected kind then this function will panic. -func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch { - case ty.IsListType(): - nesting = configschema.NestingList - case ty.IsSetType(): - nesting = configschema.NestingSet - default: - panic("unsuitable type") - } - nested := SchemaForCtyElementType(ty.ElementType()) - return &configschema.NestedBlock{ - Nesting: nesting, - Block: *nested, - } -} - -// TypeCanBeBlocks returns true if the given type is a list-of-object or -// set-of-object type, and would thus be subject to the blocktoattr fixup -// if used as an attribute type. -func TypeCanBeBlocks(ty cty.Type) bool { - return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() -} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go deleted file mode 100644 index ae5c609d..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go +++ /dev/null @@ -1,45 +0,0 @@ -package blocktoattr - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/configs/configschema" -) - -// ExpandedVariables finds all of the global variables referenced in the -// given body with the given schema while taking into account the possibilities -// both of "dynamic" blocks being expanded and the possibility of certain -// attributes being written instead as nested blocks as allowed by the -// FixUpBlockAttrs function. -// -// This function exists to allow variables to be analyzed prior to dynamic -// block expansion while also dealing with the fact that dynamic block expansion -// might in turn produce nested blocks that are subject to FixUpBlockAttrs. -// -// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, -// which is itself a drop-in replacement for hcldec.Variables. -func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { - rootNode := dynblock.WalkVariables(body) - return walkVariables(rootNode, body, schema) -} - -func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { - givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) - ambiguousNames := ambiguousNames(schema) - effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) - vars, children := node.Visit(effectiveRawSchema) - - for _, child := range children { - if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { - vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) - } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { - // ☝️Check for collection type before element type, because if this is a mis-placed reference, - // a panic here will prevent other useful diags from being elevated to show the user what to fix - synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) - vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) - } - } - - return vars -} diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go deleted file mode 100644 index ebc008e3..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/data.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Data is an interface whose implementations can provide cty.Value -// representations of objects identified by referenceable addresses from -// the addrs package. -// -// This interface will grow each time a new type of reference is added, and so -// implementations outside of the Terraform codebases are not advised. -// -// Each method returns a suitable value and optionally some diagnostics. If the -// returned diagnostics contains errors then the type of the returned value is -// used to construct an unknown value of the same type which is then used in -// place of the requested object so that type checking can still proceed. In -// cases where it's not possible to even determine a suitable result type, -// cty.DynamicVal is returned along with errors describing the problem. -type Data interface { - StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics - - GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) - GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/doc.go b/vendor/github.com/hashicorp/terraform/lang/doc.go deleted file mode 100644 index af5c5cac..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package lang deals with the runtime aspects of Terraform's configuration -// language, with concerns such as expression evaluation. It is closely related -// to sibling package "configs", which is responsible for configuration -// parsing and static validation. -package lang diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go deleted file mode 100644 index bfacd671..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/eval.go +++ /dev/null @@ -1,477 +0,0 @@ -package lang - -import ( - "fmt" - "log" - "strconv" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/ext/dynblock" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang/blocktoattr" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// ExpandBlock expands any "dynamic" blocks present in the given body. The -// result is a body with those blocks expanded, ready to be evaluated with -// EvalBlock. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - traversals := dynblock.ExpandVariablesHCLDec(body, spec) - refs, diags := References(traversals) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - - return dynblock.Expand(body, ctx), diags -} - -// EvalBlock evaluates the given body using the given block schema and returns -// a cty object value representing its contents. The type of the result conforms -// to the implied type of the given schema. -// -// This function does not automatically expand "dynamic" blocks within the -// body. If that is desired, first call the ExpandBlock method to obtain -// an expanded body to pass to this method. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. -func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - spec := schema.DecoderSpec() - - refs, diags := ReferencesInBlock(body, schema) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(schema.ImpliedType()), diags - } - - // HACK: In order to remain compatible with some assumptions made in - // Terraform v0.11 and earlier about the approximate equivalence of - // attribute vs. block syntax, we do a just-in-time fixup here to allow - // any attribute in the schema that has a list-of-objects or set-of-objects - // kind to potentially be populated instead by one or more nested blocks - // whose type is the attribute name. - body = blocktoattr.FixUpBlockAttrs(body, schema) - - val, evalDiags := hcldec.Decode(body, spec, ctx) - diags = diags.Append(evalDiags) - - return val, diags -} - -// EvalExpr evaluates a single expression in the receiving context and returns -// the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - refs, diags := ReferencesInExpr(expr) - - ctx, ctxDiags := s.EvalContext(refs) - diags = diags.Append(ctxDiags) - if diags.HasErrors() { - // We'll stop early if we found problems in the references, because - // it's likely evaluation will produce redundant copies of the same errors. - return cty.UnknownVal(wantType), diags - } - - val, evalDiags := expr.Value(ctx) - diags = diags.Append(evalDiags) - - if wantType != cty.DynamicPseudoType { - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: expr.Range().Ptr(), - }) - } - } - - return val, diags -} - -// EvalReference evaluates the given reference in the receiving scope and -// returns the resulting value. The value will be converted to the given type before -// it is returned if possible, or else an error diagnostic will be produced -// describing the conversion error. -// -// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion -// and just obtain the returned value directly. -// -// If the returned diagnostics contains errors then the result may be -// incomplete, but will always be of the requested type. -func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // We cheat a bit here and just build an EvalContext for our requested - // reference with the "self" address overridden, and then pull the "self" - // result out of it to return. - ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) - diags = diags.Append(ctxDiags) - val := ctx.Variables["self"] - if val == cty.NilVal { - val = cty.DynamicVal - } - - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - val = cty.UnknownVal(wantType) - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - - return val, diags -} - -// EvalContext constructs a HCL expression evaluation context whose variable -// scope contains sufficient values to satisfy the given set of references. -// -// Most callers should prefer to use the evaluation helper methods that -// this type offers, but this is here for less common situations where the -// caller will handle the evaluation calls itself. -func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { - return s.evalContext(refs, s.SelfAddr) -} - -func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { - if s == nil { - panic("attempt to construct EvalContext for nil Scope") - } - - var diags tfdiags.Diagnostics - vals := make(map[string]cty.Value) - funcs := s.Functions() - ctx := &hcl.EvalContext{ - Variables: vals, - Functions: funcs, - } - - if len(refs) == 0 { - // Easy path for common case where there are no references at all. - return ctx, diags - } - - // First we'll do static validation of the references. This catches things - // early that might otherwise not get caught due to unknown values being - // present in the scope during planning. - if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { - diags = diags.Append(staticDiags) - return ctx, diags - } - - // The reference set we are given has not been de-duped, and so there can - // be redundant requests in it for two reasons: - // - The same item is referenced multiple times - // - Both an item and that item's container are separately referenced. - // We will still visit every reference here and ask our data source for - // it, since that allows us to gather a full set of any errors and - // warnings, but once we've gathered all the data we'll then skip anything - // that's redundant in the process of populating our values map. - dataResources := map[string]map[string]cty.Value{} - managedResources := map[string]map[string]cty.Value{} - wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} - moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} - inputVariables := map[string]cty.Value{} - localValues := map[string]cty.Value{} - pathAttrs := map[string]cty.Value{} - terraformAttrs := map[string]cty.Value{} - countAttrs := map[string]cty.Value{} - forEachAttrs := map[string]cty.Value{} - var self cty.Value - - for _, ref := range refs { - rng := ref.SourceRange - - rawSubj := ref.Subject - if rawSubj == addrs.Self { - if selfAddr == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - continue - } - - if selfAddr == addrs.Self { - // Programming error: the self address cannot alias itself. - panic("scope SelfAddr attempting to alias itself") - } - - // self can only be used within a resource instance - subj := selfAddr.(addrs.ResourceInstance) - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj.ContainingResource(), rng)) - - diags = diags.Append(valDiags) - - // Self is an exception in that it must always resolve to a - // particular instance. We will still insert the full resource into - // the context below. - var hclDiags hcl.Diagnostics - // We should always have a valid self index by this point, but in - // the case of an error, self may end up as a cty.DynamicValue. - switch k := subj.Key.(type) { - case addrs.IntKey: - self, hclDiags = hcl.Index(val, cty.NumberIntVal(int64(k)), ref.SourceRange.ToHCL().Ptr()) - diags.Append(hclDiags) - case addrs.StringKey: - self, hclDiags = hcl.Index(val, cty.StringVal(string(k)), ref.SourceRange.ToHCL().Ptr()) - diags.Append(hclDiags) - default: - self = val - } - continue - } - - // This type switch must cover all of the "Referenceable" implementations - // in package addrs, however we are removing the possibility of - // ResourceInstance beforehand. - if addr, ok := rawSubj.(addrs.ResourceInstance); ok { - rawSubj = addr.ContainingResource() - } - - switch subj := rawSubj.(type) { - case addrs.Resource: - var into map[string]map[string]cty.Value - switch subj.Mode { - case addrs.ManagedResourceMode: - into = managedResources - case addrs.DataResourceMode: - into = dataResources - default: - panic(fmt.Errorf("unsupported ResourceMode %s", subj.Mode)) - } - - val, valDiags := normalizeRefValue(s.Data.GetResource(subj, rng)) - diags = diags.Append(valDiags) - - r := subj - if into[r.Type] == nil { - into[r.Type] = make(map[string]cty.Value) - } - into[r.Type][r.Name] = val - - case addrs.ModuleCallInstance: - val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) - diags = diags.Append(valDiags) - - if wholeModules[subj.Call.Name] == nil { - wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) - } - wholeModules[subj.Call.Name][subj.Key] = val - - case addrs.ModuleCallOutput: - val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) - diags = diags.Append(valDiags) - - callName := subj.Call.Call.Name - callKey := subj.Call.Key - if moduleOutputs[callName] == nil { - moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) - } - if moduleOutputs[callName][callKey] == nil { - moduleOutputs[callName][callKey] = make(map[string]cty.Value) - } - moduleOutputs[callName][callKey][subj.Name] = val - - case addrs.InputVariable: - val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) - diags = diags.Append(valDiags) - inputVariables[subj.Name] = val - - case addrs.LocalValue: - val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) - diags = diags.Append(valDiags) - localValues[subj.Name] = val - - case addrs.PathAttr: - val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) - diags = diags.Append(valDiags) - pathAttrs[subj.Name] = val - - case addrs.TerraformAttr: - val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) - diags = diags.Append(valDiags) - terraformAttrs[subj.Name] = val - - case addrs.CountAttr: - val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) - diags = diags.Append(valDiags) - countAttrs[subj.Name] = val - - case addrs.ForEachAttr: - val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) - diags = diags.Append(valDiags) - forEachAttrs[subj.Name] = val - - default: - // Should never happen - panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) - } - } - - for k, v := range buildResourceObjects(managedResources) { - vals[k] = v - } - vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) - vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) - vals["var"] = cty.ObjectVal(inputVariables) - vals["local"] = cty.ObjectVal(localValues) - vals["path"] = cty.ObjectVal(pathAttrs) - vals["terraform"] = cty.ObjectVal(terraformAttrs) - vals["count"] = cty.ObjectVal(countAttrs) - vals["each"] = cty.ObjectVal(forEachAttrs) - if self != cty.NilVal { - vals["self"] = self - } - - return ctx, diags -} - -func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - for typeName, nameVals := range resources { - vals[typeName] = cty.ObjectVal(nameVals) - } - return vals -} - -func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { - vals := make(map[string]cty.Value) - - for name, keys := range wholeModules { - vals[name] = buildInstanceObjects(keys) - } - - for name, keys := range moduleOutputs { - if _, exists := wholeModules[name]; exists { - // If we also have a whole module value for this name then we'll - // skip this since the individual outputs are embedded in that result. - continue - } - - // The shape of this collection isn't compatible with buildInstanceObjects, - // but rather than replicating most of the buildInstanceObjects logic - // here we'll instead first transform the structure to be what that - // function expects and then use it. This is a little wasteful, but - // we do not expect this these maps to be large and so the extra work - // here should not hurt too much. - flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) - for k, vals := range keys { - flattened[k] = cty.ObjectVal(vals) - } - vals[name] = buildInstanceObjects(flattened) - } - - return vals -} - -func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { - if val, exists := keys[addrs.NoKey]; exists { - // If present, a "no key" value supersedes all other values, - // since they should be embedded inside it. - return val - } - - // If we only have individual values then we need to construct - // either a list or a map, depending on what sort of keys we - // have. - haveInt := false - haveString := false - maxInt := 0 - - for k := range keys { - switch tk := k.(type) { - case addrs.IntKey: - haveInt = true - if int(tk) > maxInt { - maxInt = int(tk) - } - case addrs.StringKey: - haveString = true - } - } - - // We should either have ints or strings and not both, but - // if we have both then we'll prefer strings and let the - // language interpreter try to convert the int keys into - // strings in a map. - switch { - case haveString: - vals := make(map[string]cty.Value) - for k, v := range keys { - switch tk := k.(type) { - case addrs.StringKey: - vals[string(tk)] = v - case addrs.IntKey: - sk := strconv.Itoa(int(tk)) - vals[sk] = v - } - } - return cty.ObjectVal(vals) - case haveInt: - // We'll make a tuple that is long enough for our maximum - // index value. It doesn't matter if we end up shorter than - // the number of instances because if length(...) were - // being evaluated we would've got a NoKey reference and - // thus not ended up in this codepath at all. - vals := make([]cty.Value, maxInt+1) - for i := range vals { - if v, exists := keys[addrs.IntKey(i)]; exists { - vals[i] = v - } else { - // Just a placeholder, since nothing will access this anyway - vals[i] = cty.DynamicVal - } - } - return cty.TupleVal(vals) - default: - // Should never happen because there are no other key types. - log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") - return cty.EmptyObjectVal - } -} - -func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { - if diags.HasErrors() { - // If there are errors then we will force an unknown result so that - // we can still evaluate and catch type errors but we'll avoid - // producing redundant re-statements of the same errors we've already - // dealt with here. - return cty.UnknownVal(val.Type()), diags - } - return val, diags -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go deleted file mode 100644 index 8c075148..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go +++ /dev/null @@ -1,218 +0,0 @@ -package funcs - -import ( - "fmt" - "net" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CidrHostFunc contructs a function that calculates a full host IP address -// within a given IP network address prefix. -var CidrHostFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "hostnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var hostNum int - if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { - return cty.UnknownVal(cty.String), err - } - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ip.String()), nil - }, -}) - -// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given -// in CIDR notation into a subnet mask address. -var CidrNetmaskFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - return cty.StringVal(net.IP(network.Mask).String()), nil - }, -}) - -// CidrSubnetFunc contructs a function that calculates a subnet address within -// a given IP network address prefix. -var CidrSubnetFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - { - Name: "newbits", - Type: cty.Number, - }, - { - Name: "netnum", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var newbits int - if err := gocty.FromCtyValue(args[1], &newbits); err != nil { - return cty.UnknownVal(cty.String), err - } - var netnum int - if err := gocty.FromCtyValue(args[2], &netnum); err != nil { - return cty.UnknownVal(cty.String), err - } - - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) - } - - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if newbits > 32 { - return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") - } - - newNetwork, err := cidr.Subnet(network, newbits, netnum) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(newNetwork.String()), nil - }, -}) - -// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive -// subnet addresses at once, rather than just a single subnet extension. -var CidrSubnetsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "prefix", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "newbits", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - _, network, err := net.ParseCIDR(args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) - } - startPrefixLen, _ := network.Mask.Size() - - prefixLengthArgs := args[1:] - if len(prefixLengthArgs) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - var firstLength int - if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(1, err) - } - firstLength += startPrefixLen - - retVals := make([]cty.Value, len(prefixLengthArgs)) - - current, _ := cidr.PreviousSubnet(network, firstLength) - for i, lengthArg := range prefixLengthArgs { - var length int - if err := gocty.FromCtyValue(lengthArg, &length); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) - } - - if length < 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") - } - // For portability with 32-bit systems where the subnet number - // will be a 32-bit int, we only allow extension of 32 bits in - // one call even if we're running on a 64-bit machine. - // (Of course, this is significant only for IPv6.) - if length > 32 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") - } - length += startPrefixLen - if length > (len(network.IP) * 8) { - protocol := "IP" - switch len(network.IP) * 8 { - case 32: - protocol = "IPv4" - case 128: - protocol = "IPv6" - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) - } - - next, rollover := cidr.NextSubnet(current, length) - if rollover || !network.Contains(next.IP) { - // If we run out of suffix bits in the base CIDR prefix then - // NextSubnet will start incrementing the prefix bits, which - // we don't allow because it would then allocate addresses - // outside of the caller's given prefix. - return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) - } - - current = next - retVals[i] = cty.StringVal(current.String()) - } - - return cty.ListVal(retVals), nil - }, -}) - -// CidrHost calculates a full host IP address within a given IP network address prefix. -func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { - return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) -} - -// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. -func CidrNetmask(prefix cty.Value) (cty.Value, error) { - return CidrNetmaskFunc.Call([]cty.Value{prefix}) -} - -// CidrSubnet calculates a subnet address within a given IP network address prefix. -func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { - return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) -} - -// CidrSubnets calculates a sequence of consecutive subnet prefixes that may -// be of different prefix lengths under a common base prefix. -func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(newbits)+1) - args[0] = prefix - copy(args[1:], newbits) - return CidrSubnetsFunc.Call(args) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go deleted file mode 100644 index 2ea41687..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go +++ /dev/null @@ -1,1522 +0,0 @@ -package funcs - -import ( - "errors" - "fmt" - "sort" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - "github.com/zclconf/go-cty/cty/gocty" -) - -var ElementFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "index", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - list := args[0] - listTy := list.Type() - switch { - case listTy.IsListType(): - return listTy.ElementType(), nil - case listTy.IsTupleType(): - if !args[1].IsKnown() { - // If the index isn't known yet then we can't predict the - // result type since each tuple element can have its own type. - return cty.DynamicPseudoType, nil - } - - etys := listTy.TupleElementTypes() - var index int - err := gocty.FromCtyValue(args[1], &index) - if err != nil { - // e.g. fractional number where whole number is required - return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) - } - if len(etys) == 0 { - return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list") - } - index = index % len(etys) - return etys[index], nil - default: - return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var index int - err := gocty.FromCtyValue(args[1], &index) - if err != nil { - // can't happen because we checked this in the Type function above - return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) - } - - if !args[0].IsKnown() { - return cty.UnknownVal(retType), nil - } - - l := args[0].LengthInt() - if l == 0 { - return cty.DynamicVal, errors.New("cannot use element function with an empty list") - } - index = index % l - - // We did all the necessary type checks in the type function above, - // so this is guaranteed not to fail. - return args[0].Index(cty.NumberIntVal(int64(index))), nil - }, -}) - -var LengthFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - collTy := args[0].Type() - switch { - case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: - return cty.Number, nil - default: - return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - coll := args[0] - collTy := args[0].Type() - switch { - case collTy == cty.DynamicPseudoType: - return cty.UnknownVal(cty.Number), nil - case collTy.IsTupleType(): - l := len(collTy.TupleElementTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy.IsObjectType(): - l := len(collTy.AttributeTypes()) - return cty.NumberIntVal(int64(l)), nil - case collTy == cty.String: - // We'll delegate to the cty stdlib strlen function here, because - // it deals with all of the complexities of tokenizing unicode - // grapheme clusters. - return stdlib.Strlen(coll) - case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): - return coll.Length(), nil - default: - // Should never happen, because of the checks in our Type func above - return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") - } - }, -}) - -// CoalesceFunc constructs a function that takes any number of arguments and -// returns the first one that isn't empty. This function was copied from go-cty -// stdlib and modified so that it returns the first *non-empty* non-null element -// from a sequence, instead of merely the first non-null. -var CoalesceFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - argTypes := make([]cty.Type, len(args)) - for i, val := range args { - argTypes[i] = val.Type() - } - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - return retType, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, argVal := range args { - // We already know this will succeed because of the checks in our Type func above - argVal, _ = convert.Convert(argVal, retType) - if !argVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - if argVal.IsNull() { - continue - } - if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { - continue - } - - return argVal, nil - } - return cty.NilVal, errors.New("no non-null, non-empty-string arguments") - }, -}) - -// CoalesceListFunc constructs a function that takes any number of list arguments -// and returns the first one that isn't empty. -var CoalesceListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - // if any argument is unknown, we can't be certain know which type we will return - if !arg.IsKnown() { - return cty.DynamicPseudoType, nil - } - ty := arg.Type() - - if !ty.IsListType() && !ty.IsTupleType() { - return cty.NilType, errors.New("coalescelist arguments must be lists or tuples") - } - - argTypes[i] = arg.Type() - } - - last := argTypes[0] - // If there are mixed types, we have to return a dynamic type. - for _, next := range argTypes[1:] { - if !next.Equals(last) { - return cty.DynamicPseudoType, nil - } - } - - return last, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsKnown() { - // If we run into an unknown list at some point, we can't - // predict the final result yet. (If there's a known, non-empty - // arg before this then we won't get here.) - return cty.UnknownVal(retType), nil - } - - if arg.LengthInt() > 0 { - return arg, nil - } - } - - return cty.NilVal, errors.New("no non-null arguments") - }, -}) - -// CompactFunc constructs a function that takes a list of strings and returns a new list -// with any empty string elements removed. -var CompactFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.String), - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - if !listVal.IsWhollyKnown() { - // If some of the element values aren't known yet then we - // can't yet return a compacted list - return cty.UnknownVal(retType), nil - } - - var outputList []cty.Value - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - if v.IsNull() || v.AsString() == "" { - continue - } - outputList = append(outputList, v) - } - - if len(outputList) == 0 { - return cty.ListValEmpty(cty.String), nil - } - - return cty.ListVal(outputList), nil - }, -}) - -// ContainsFunc constructs a function that determines whether a given list or -// set contains a given single value as one of its elements. -var ContainsFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - arg := args[0] - ty := arg.Type() - - if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() { - return cty.NilVal, errors.New("argument must be list, tuple, or set") - } - - _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1]) - if err != nil { - return cty.False, nil - } - - return cty.True, nil - }, -}) - -// IndexFunc constructs a function that finds the element index for a given value in a list. -var IndexFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { - return cty.NilVal, errors.New("argument must be a list or tuple") - } - - if !args[0].IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - - if args[0].LengthInt() == 0 { // Easy path - return cty.NilVal, errors.New("cannot search an empty list") - } - - for it := args[0].ElementIterator(); it.Next(); { - i, v := it.Element() - eq, err := stdlib.Equal(v, args[1]) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - if eq.True() { - return i, nil - } - } - return cty.NilVal, errors.New("item not found") - - }, -}) - -// DistinctFunc constructs a function that takes a list and returns a new list -// with any duplicate elements removed. -var DistinctFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - - if !listVal.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - var list []cty.Value - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - list, err = appendIfMissing(list, v) - if err != nil { - return cty.NilVal, err - } - } - - if len(list) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(list), nil - }, -}) - -// ChunklistFunc constructs a function that splits a single list into fixed-size chunks, -// returning a list of lists. -var ChunklistFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "size", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.List(args[0].Type()), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - listVal := args[0] - if !listVal.IsKnown() { - return cty.UnknownVal(retType), nil - } - - if listVal.LengthInt() == 0 { - return cty.ListValEmpty(listVal.Type()), nil - } - - var size int - err = gocty.FromCtyValue(args[1], &size) - if err != nil { - return cty.NilVal, fmt.Errorf("invalid index: %s", err) - } - - if size < 0 { - return cty.NilVal, errors.New("the size argument must be positive") - } - - output := make([]cty.Value, 0) - - // if size is 0, returns a list made of the initial list - if size == 0 { - output = append(output, listVal) - return cty.ListVal(output), nil - } - - chunk := make([]cty.Value, 0) - - l := args[0].LengthInt() - i := 0 - - for it := listVal.ElementIterator(); it.Next(); { - _, v := it.Element() - chunk = append(chunk, v) - - // Chunk when index isn't 0, or when reaching the values's length - if (i+1)%size == 0 || (i+1) == l { - output = append(output, cty.ListVal(chunk)) - chunk = make([]cty.Value, 0) - } - i++ - } - - return cty.ListVal(output), nil - }, -}) - -// FlattenFunc constructs a function that takes a list and replaces any elements -// that are lists with a flattened sequence of the list contents. -var FlattenFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsWhollyKnown() { - return cty.DynamicPseudoType, nil - } - - argTy := args[0].Type() - if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() { - return cty.NilType, errors.New("can only flatten lists, sets and tuples") - } - - retVal, known := flattener(args[0]) - if !known { - return cty.DynamicPseudoType, nil - } - - tys := make([]cty.Type, len(retVal)) - for i, ty := range retVal { - tys[i] = ty.Type() - } - return cty.Tuple(tys), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] - if inputList.LengthInt() == 0 { - return cty.EmptyTupleVal, nil - } - - out, known := flattener(inputList) - if !known { - return cty.UnknownVal(retType), nil - } - - return cty.TupleVal(out), nil - }, -}) - -// Flatten until it's not a cty.List, and return whether the value is known. -// We can flatten lists with unknown values, as long as they are not -// lists themselves. -func flattener(flattenList cty.Value) ([]cty.Value, bool) { - out := make([]cty.Value, 0) - for it := flattenList.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { - if !val.IsKnown() { - return out, false - } - - res, known := flattener(val) - if !known { - return res, known - } - out = append(out, res...) - } else { - out = append(out, val) - } - } - return out, true -} - -// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys. -var KeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty := args[0].Type() - switch { - case ty.IsMapType(): - return cty.List(cty.String), nil - case ty.IsObjectType(): - atys := ty.AttributeTypes() - if len(atys) == 0 { - return cty.EmptyTuple, nil - } - // All of our result elements will be strings, and atys just - // decides how many there are. - etys := make([]cty.Type, len(atys)) - for i := range etys { - etys[i] = cty.String - } - return cty.Tuple(etys), nil - default: - return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - m := args[0] - var keys []cty.Value - - switch { - case m.Type().IsObjectType(): - // In this case we allow unknown values so we must work only with - // the attribute _types_, not with the value itself. - var names []string - for name := range m.Type().AttributeTypes() { - names = append(names, name) - } - sort.Strings(names) // same ordering guaranteed by cty's ElementIterator - if len(names) == 0 { - return cty.EmptyTupleVal, nil - } - keys = make([]cty.Value, len(names)) - for i, name := range names { - keys[i] = cty.StringVal(name) - } - return cty.TupleVal(keys), nil - default: - if !m.IsKnown() { - return cty.UnknownVal(retType), nil - } - - // cty guarantees that ElementIterator will iterate in lexicographical - // order by key. - for it := args[0].ElementIterator(); it.Next(); { - k, _ := it.Element() - keys = append(keys, k) - } - if len(keys) == 0 { - return cty.ListValEmpty(cty.String), nil - } - return cty.ListVal(keys), nil - } - }, -}) - -// ListFunc constructs a function that takes an arbitrary number of arguments -// and returns a list containing those values in the same order. -// -// This function is deprecated in Terraform v0.12 -var ListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - argTypes[i] = arg.Type() - } - - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.List(retType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newList := make([]cty.Value, 0, len(args)) - - for _, arg := range args { - // We already know this will succeed because of the checks in our Type func above - arg, _ = convert.Convert(arg, retType.ElementType()) - newList = append(newList, arg) - } - - return cty.ListVal(newList), nil - }, -}) - -// LookupFunc constructs a function that performs dynamic lookups of map types. -var LookupFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "inputMap", - Type: cty.DynamicPseudoType, - }, - { - Name: "key", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "default", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 1 || len(args) > 3 { - return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) - } - - ty := args[0].Type() - - switch { - case ty.IsObjectType(): - if !args[1].IsKnown() { - return cty.DynamicPseudoType, nil - } - - key := args[1].AsString() - if ty.HasAttribute(key) { - return args[0].GetAttr(key).Type(), nil - } else if len(args) == 3 { - // if the key isn't found but a default is provided, - // return the default type - return args[2].Type(), nil - } - return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) - case ty.IsMapType(): - if len(args) == 3 { - _, err = convert.Convert(args[2], ty.ElementType()) - if err != nil { - return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") - } - } - return ty.ElementType(), nil - default: - return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var defaultVal cty.Value - defaultValueSet := false - - if len(args) == 3 { - defaultVal = args[2] - defaultValueSet = true - } - - mapVar := args[0] - lookupKey := args[1].AsString() - - if !mapVar.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - if mapVar.Type().IsObjectType() { - if mapVar.Type().HasAttribute(lookupKey) { - return mapVar.GetAttr(lookupKey), nil - } - } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { - return mapVar.Index(cty.StringVal(lookupKey)), nil - } - - if defaultValueSet { - defaultVal, err = convert.Convert(defaultVal, retType) - if err != nil { - return cty.NilVal, err - } - return defaultVal, nil - } - - return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( - "lookup failed to find '%s'", lookupKey) - }, -}) - -// MapFunc constructs a function that takes an even number of arguments and -// returns a map whose elements are constructed from consecutive pairs of arguments. -// -// This function is deprecated in Terraform v0.12 -var MapFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 2 || len(args)%2 != 0 { - return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) - } - - argTypes := make([]cty.Type, len(args)/2) - index := 0 - - for i := 0; i < len(args); i += 2 { - argTypes[index] = args[i+1].Type() - index++ - } - - valType, _ := convert.UnifyUnsafe(argTypes) - if valType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.Map(valType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - } - - outputMap := make(map[string]cty.Value) - - for i := 0; i < len(args); i += 2 { - - key := args[i].AsString() - - err := gocty.FromCtyValue(args[i], &key) - if err != nil { - return cty.NilVal, err - } - - val := args[i+1] - - var variable cty.Value - err = gocty.FromCtyValue(val, &variable) - if err != nil { - return cty.NilVal, err - } - - // We already know this will succeed because of the checks in our Type func above - variable, _ = convert.Convert(variable, retType.ElementType()) - - // Check for duplicate keys - if _, ok := outputMap[key]; ok { - return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) - } - outputMap[key] = variable - } - - return cty.MapVal(outputMap), nil - }, -}) - -// MatchkeysFunc constructs a function that constructs a new list by taking a -// subset of elements from one list whose indexes match the corresponding -// indexes of values in another list. -var MatchkeysFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "keys", - Type: cty.List(cty.DynamicPseudoType), - }, - { - Name: "searchset", - Type: cty.List(cty.DynamicPseudoType), - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - if ty == cty.NilType { - return cty.NilType, errors.New("keys and searchset must be of the same type") - } - - // the return type is based on args[0] (values) - return args[0].Type(), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !args[0].IsKnown() { - return cty.UnknownVal(cty.List(retType.ElementType())), nil - } - - if args[0].LengthInt() != args[1].LengthInt() { - return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") - } - - output := make([]cty.Value, 0) - values := args[0] - - // Keys and searchset must be the same type. - // We can skip error checking here because we've already verified that - // they can be unified in the Type function - ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) - keys, _ := convert.Convert(args[1], ty) - searchset, _ := convert.Convert(args[2], ty) - - // if searchset is empty, return an empty list. - if searchset.LengthInt() == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - - if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, key := it.Element() - for iter := searchset.ElementIterator(); iter.Next(); { - _, search := iter.Element() - eq, err := stdlib.Equal(key, search) - if err != nil { - return cty.NilVal, err - } - if !eq.IsKnown() { - return cty.ListValEmpty(retType.ElementType()), nil - } - if eq.True() { - v := values.Index(cty.NumberIntVal(int64(i))) - output = append(output, v) - break - } - } - i++ - } - - // if we haven't matched any key, then output is an empty list. - if len(output) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(output), nil - }, -}) - -// MergeFunc constructs a function that takes an arbitrary number of maps and -// returns a single map that contains a merged set of elements from all of the maps. -// -// If more than one given map defines the same key then the one that is later in -// the argument sequence takes precedence. -var MergeFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "maps", - Type: cty.DynamicPseudoType, - AllowDynamicType: true, - }, - Type: function.StaticReturnType(cty.DynamicPseudoType), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - outputMap := make(map[string]cty.Value) - - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { - return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) - } - for it := arg.ElementIterator(); it.Next(); { - k, v := it.Element() - outputMap[k.AsString()] = v - } - } - return cty.ObjectVal(outputMap), nil - }, -}) - -// ReverseFunc takes a sequence and produces a new sequence of the same length -// with all of the same elements as the given sequence but in reverse order. -var ReverseFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - argTy := args[0].Type() - switch { - case argTy.IsTupleType(): - argTys := argTy.TupleElementTypes() - retTys := make([]cty.Type, len(argTys)) - for i, ty := range argTys { - retTys[len(retTys)-i-1] = ty - } - return cty.Tuple(retTys), nil - case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list - return cty.List(argTy.ElementType()), nil - default: - return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - in := args[0].AsValueSlice() - outVals := make([]cty.Value, len(in)) - for i, v := range in { - outVals[len(outVals)-i-1] = v - } - switch { - case retType.IsTupleType(): - return cty.TupleVal(outVals), nil - default: - if len(outVals) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(outVals), nil - } - }, -}) - -// SetProductFunc calculates the Cartesian product of two or more sets or -// sequences. If the arguments are all lists then the result is a list of tuples, -// preserving the ordering of all of the input lists. Otherwise the result is a -// set of tuples. -var SetProductFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "sets", - Type: cty.DynamicPseudoType, - }, - Type: func(args []cty.Value) (retType cty.Type, err error) { - if len(args) < 2 { - return cty.NilType, errors.New("at least two arguments are required") - } - - listCount := 0 - elemTys := make([]cty.Type, len(args)) - for i, arg := range args { - aty := arg.Type() - switch { - case aty.IsSetType(): - elemTys[i] = aty.ElementType() - case aty.IsListType(): - elemTys[i] = aty.ElementType() - listCount++ - case aty.IsTupleType(): - // We can accept a tuple type only if there's some common type - // that all of its elements can be converted to. - allEtys := aty.TupleElementTypes() - if len(allEtys) == 0 { - elemTys[i] = cty.DynamicPseudoType - listCount++ - break - } - ety, _ := convert.UnifyUnsafe(allEtys) - if ety == cty.NilType { - return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") - } - elemTys[i] = ety - listCount++ - default: - return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") - } - } - - if listCount == len(args) { - return cty.List(cty.Tuple(elemTys)), nil - } - return cty.Set(cty.Tuple(elemTys)), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - ety := retType.ElementType() - - total := 1 - for _, arg := range args { - // Because of our type checking function, we are guaranteed that - // all of the arguments are known, non-null values of types that - // support LengthInt. - total *= arg.LengthInt() - } - - if total == 0 { - // If any of the arguments was an empty collection then our result - // is also an empty collection, which we'll short-circuit here. - if retType.IsListType() { - return cty.ListValEmpty(ety), nil - } - return cty.SetValEmpty(ety), nil - } - - subEtys := ety.TupleElementTypes() - product := make([][]cty.Value, total) - - b := make([]cty.Value, total*len(args)) - n := make([]int, len(args)) - s := 0 - argVals := make([][]cty.Value, len(args)) - for i, arg := range args { - argVals[i] = arg.AsValueSlice() - } - - for i := range product { - e := s + len(args) - pi := b[s:e] - product[i] = pi - s = e - - for j, n := range n { - val := argVals[j][n] - ty := subEtys[j] - if !val.Type().Equals(ty) { - var err error - val, err = convert.Convert(val, ty) - if err != nil { - // Should never happen since we checked this in our - // type-checking function. - return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) - } - } - pi[j] = val - } - - for j := len(n) - 1; j >= 0; j-- { - n[j]++ - if n[j] < len(argVals[j]) { - break - } - n[j] = 0 - } - } - - productVals := make([]cty.Value, total) - for i, vals := range product { - productVals[i] = cty.TupleVal(vals) - } - - if retType.IsListType() { - return cty.ListVal(productVals), nil - } - return cty.SetVal(productVals), nil - }, -}) - -// SliceFunc constructs a function that extracts some consecutive elements -// from within a list. -var SliceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, - }, - { - Name: "start_index", - Type: cty.Number, - }, - { - Name: "end_index", - Type: cty.Number, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - arg := args[0] - argTy := arg.Type() - - if argTy.IsSetType() { - return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important") - } - if !argTy.IsListType() && !argTy.IsTupleType() { - return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value") - } - - startIndex, endIndex, idxsKnown, err := sliceIndexes(args) - if err != nil { - return cty.NilType, err - } - - if argTy.IsListType() { - return argTy, nil - } - - if !idxsKnown { - // If we don't know our start/end indices then we can't predict - // the result type if we're planning to return a tuple. - return cty.DynamicPseudoType, nil - } - return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputList := args[0] - - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - - // we ignore idxsKnown return value here because the indices are always - // known here, or else the call would've short-circuited. - startIndex, endIndex, _, err := sliceIndexes(args) - if err != nil { - return cty.NilVal, err - } - - if endIndex-startIndex == 0 { - if retType.IsTupleType() { - return cty.EmptyTupleVal, nil - } - return cty.ListValEmpty(retType.ElementType()), nil - } - - outputList := inputList.AsValueSlice()[startIndex:endIndex] - - if retType.IsTupleType() { - return cty.TupleVal(outputList), nil - } - - return cty.ListVal(outputList), nil - }, -}) - -func sliceIndexes(args []cty.Value) (int, int, bool, error) { - var startIndex, endIndex, length int - var startKnown, endKnown, lengthKnown bool - - if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known - length = args[0].LengthInt() - lengthKnown = true - } - - if args[1].IsKnown() { - if err := gocty.FromCtyValue(args[1], &startIndex); err != nil { - return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err) - } - if startIndex < 0 { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero") - } - if lengthKnown && startIndex > length { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list") - } - startKnown = true - } - if args[2].IsKnown() { - if err := gocty.FromCtyValue(args[2], &endIndex); err != nil { - return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err) - } - if endIndex < 0 { - return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero") - } - if lengthKnown && endIndex > length { - return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list") - } - endKnown = true - } - if startKnown && endKnown { - if startIndex > endIndex { - return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index") - } - } - return startIndex, endIndex, startKnown && endKnown, nil -} - -// TransposeFunc constructs a function that takes a map of lists of strings and -// swaps the keys and values to produce a new map of lists of strings. -var TransposeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.Map(cty.List(cty.String)), - }, - }, - Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - inputMap := args[0] - if !inputMap.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - outputMap := make(map[string]cty.Value) - tmpMap := make(map[string][]string) - - for it := inputMap.ElementIterator(); it.Next(); { - inKey, inVal := it.Element() - for iter := inVal.ElementIterator(); iter.Next(); { - _, val := iter.Element() - if !val.Type().Equals(cty.String) { - return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") - } - - outKey := val.AsString() - if _, ok := tmpMap[outKey]; !ok { - tmpMap[outKey] = make([]string, 0) - } - outVal := tmpMap[outKey] - outVal = append(outVal, inKey.AsString()) - sort.Strings(outVal) - tmpMap[outKey] = outVal - } - } - - for outKey, outVal := range tmpMap { - values := make([]cty.Value, 0) - for _, v := range outVal { - values = append(values, cty.StringVal(v)) - } - outputMap[outKey] = cty.ListVal(values) - } - - if len(outputMap) == 0 { - return cty.MapValEmpty(cty.List(cty.String)), nil - } - - return cty.MapVal(outputMap), nil - }, -}) - -// ValuesFunc constructs a function that returns a list of the map values, -// in the order of the sorted keys. -var ValuesFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "values", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - ty := args[0].Type() - if ty.IsMapType() { - return cty.List(ty.ElementType()), nil - } else if ty.IsObjectType() { - // The result is a tuple type with all of the same types as our - // object type's attributes, sorted in lexicographical order by the - // keys. (This matches the sort order guaranteed by ElementIterator - // on a cty object value.) - atys := ty.AttributeTypes() - if len(atys) == 0 { - return cty.EmptyTuple, nil - } - attrNames := make([]string, 0, len(atys)) - for name := range atys { - attrNames = append(attrNames, name) - } - sort.Strings(attrNames) - - tys := make([]cty.Type, len(attrNames)) - for i, name := range attrNames { - tys[i] = atys[name] - } - return cty.Tuple(tys), nil - } - return cty.NilType, errors.New("values() requires a map as the first argument") - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - mapVar := args[0] - - // We can just iterate the map/object value here because cty guarantees - // that these types always iterate in key lexicographical order. - var values []cty.Value - for it := mapVar.ElementIterator(); it.Next(); { - _, val := it.Element() - values = append(values, val) - } - - if retType.IsTupleType() { - return cty.TupleVal(values), nil - } - if len(values) == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - return cty.ListVal(values), nil - }, -}) - -// ZipmapFunc constructs a function that constructs a map from a list of keys -// and a corresponding list of values. -var ZipmapFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "keys", - Type: cty.List(cty.String), - }, - { - Name: "values", - Type: cty.DynamicPseudoType, - }, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - keys := args[0] - values := args[1] - valuesTy := values.Type() - - switch { - case valuesTy.IsListType(): - return cty.Map(values.Type().ElementType()), nil - case valuesTy.IsTupleType(): - if !keys.IsWhollyKnown() { - // Since zipmap with a tuple produces an object, we need to know - // all of the key names before we can predict our result type. - return cty.DynamicPseudoType, nil - } - - keysRaw := keys.AsValueSlice() - valueTypesRaw := valuesTy.TupleElementTypes() - if len(keysRaw) != len(valueTypesRaw) { - return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) - } - atys := make(map[string]cty.Type, len(valueTypesRaw)) - for i, keyVal := range keysRaw { - if keyVal.IsNull() { - return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) - } - key := keyVal.AsString() - atys[key] = valueTypesRaw[i] - } - return cty.Object(atys), nil - - default: - return cty.NilType, errors.New("values argument must be a list or tuple value") - } - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - keys := args[0] - values := args[1] - - if !keys.IsWhollyKnown() { - // Unknown map keys and object attributes are not supported, so - // our entire result must be unknown in this case. - return cty.UnknownVal(retType), nil - } - - // both keys and values are guaranteed to be shallowly-known here, - // because our declared params above don't allow unknown or null values. - if keys.LengthInt() != values.LengthInt() { - return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) - } - - output := make(map[string]cty.Value) - - i := 0 - for it := keys.ElementIterator(); it.Next(); { - _, v := it.Element() - val := values.Index(cty.NumberIntVal(int64(i))) - output[v.AsString()] = val - i++ - } - - switch { - case retType.IsMapType(): - if len(output) == 0 { - return cty.MapValEmpty(retType.ElementType()), nil - } - return cty.MapVal(output), nil - case retType.IsObjectType(): - return cty.ObjectVal(output), nil - default: - // Should never happen because the type-check function should've - // caught any other case. - return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) - } - }, -}) - -// helper function to add an element to a list, if it does not already exist -func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { - for _, ele := range slice { - eq, err := stdlib.Equal(ele, element) - if err != nil { - return slice, err - } - if eq.True() { - return slice, nil - } - } - return append(slice, element), nil -} - -// Element returns a single element from a given list at the given index. If -// index is greater than the length of the list then it is wrapped modulo -// the list length. -func Element(list, index cty.Value) (cty.Value, error) { - return ElementFunc.Call([]cty.Value{list, index}) -} - -// Length returns the number of elements in the given collection or number of -// Unicode characters in the given string. -func Length(collection cty.Value) (cty.Value, error) { - return LengthFunc.Call([]cty.Value{collection}) -} - -// Coalesce takes any number of arguments and returns the first one that isn't empty. -func Coalesce(args ...cty.Value) (cty.Value, error) { - return CoalesceFunc.Call(args) -} - -// CoalesceList takes any number of list arguments and returns the first one that isn't empty. -func CoalesceList(args ...cty.Value) (cty.Value, error) { - return CoalesceListFunc.Call(args) -} - -// Compact takes a list of strings and returns a new list -// with any empty string elements removed. -func Compact(list cty.Value) (cty.Value, error) { - return CompactFunc.Call([]cty.Value{list}) -} - -// Contains determines whether a given list contains a given single value -// as one of its elements. -func Contains(list, value cty.Value) (cty.Value, error) { - return ContainsFunc.Call([]cty.Value{list, value}) -} - -// Index finds the element index for a given value in a list. -func Index(list, value cty.Value) (cty.Value, error) { - return IndexFunc.Call([]cty.Value{list, value}) -} - -// Distinct takes a list and returns a new list with any duplicate elements removed. -func Distinct(list cty.Value) (cty.Value, error) { - return DistinctFunc.Call([]cty.Value{list}) -} - -// Chunklist splits a single list into fixed-size chunks, returning a list of lists. -func Chunklist(list, size cty.Value) (cty.Value, error) { - return ChunklistFunc.Call([]cty.Value{list, size}) -} - -// Flatten takes a list and replaces any elements that are lists with a flattened -// sequence of the list contents. -func Flatten(list cty.Value) (cty.Value, error) { - return FlattenFunc.Call([]cty.Value{list}) -} - -// Keys takes a map and returns a sorted list of the map keys. -func Keys(inputMap cty.Value) (cty.Value, error) { - return KeysFunc.Call([]cty.Value{inputMap}) -} - -// List takes any number of list arguments and returns a list containing those -// values in the same order. -func List(args ...cty.Value) (cty.Value, error) { - return ListFunc.Call(args) -} - -// Lookup performs a dynamic lookup into a map. -// There are two required arguments, map and key, plus an optional default, -// which is a value to return if no key is found in map. -func Lookup(args ...cty.Value) (cty.Value, error) { - return LookupFunc.Call(args) -} - -// Map takes an even number of arguments and returns a map whose elements are constructed -// from consecutive pairs of arguments. -func Map(args ...cty.Value) (cty.Value, error) { - return MapFunc.Call(args) -} - -// Matchkeys constructs a new list by taking a subset of elements from one list -// whose indexes match the corresponding indexes of values in another list. -func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { - return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) -} - -// Merge takes an arbitrary number of maps and returns a single map that contains -// a merged set of elements from all of the maps. -// -// If more than one given map defines the same key then the one that is later in -// the argument sequence takes precedence. -func Merge(maps ...cty.Value) (cty.Value, error) { - return MergeFunc.Call(maps) -} - -// Reverse takes a sequence and produces a new sequence of the same length -// with all of the same elements as the given sequence but in reverse order. -func Reverse(list cty.Value) (cty.Value, error) { - return ReverseFunc.Call([]cty.Value{list}) -} - -// SetProduct computes the Cartesian product of sets or sequences. -func SetProduct(sets ...cty.Value) (cty.Value, error) { - return SetProductFunc.Call(sets) -} - -// Slice extracts some consecutive elements from within a list. -func Slice(list, start, end cty.Value) (cty.Value, error) { - return SliceFunc.Call([]cty.Value{list, start, end}) -} - -// Transpose takes a map of lists of strings and swaps the keys and values to -// produce a new map of lists of strings. -func Transpose(values cty.Value) (cty.Value, error) { - return TransposeFunc.Call([]cty.Value{values}) -} - -// Values returns a list of the map values, in the order of the sorted keys. -// This function only works on flat maps. -func Values(values cty.Value) (cty.Value, error) { - return ValuesFunc.Call([]cty.Value{values}) -} - -// Zipmap constructs a map from a list of keys and a corresponding list of values. -func Zipmap(keys, values cty.Value) (cty.Value, error) { - return ZipmapFunc.Call([]cty.Value{keys, values}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go deleted file mode 100644 index 83f85979..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -package funcs - -import ( - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeToFunc constructs a "to..." function, like "tostring", which converts -// its argument to a specific type or type kind. -// -// The given type wantTy can be any type constraint that cty's "convert" package -// would accept. In particular, this means that you can pass -// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which -// will then cause cty to attempt to unify all of the element types when given -// a tuple. -func MakeToFunc(wantTy cty.Type) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "v", - // We use DynamicPseudoType rather than wantTy here so that - // all values will pass through the function API verbatim and - // we can handle the conversion logic within the Type and - // Impl functions. This allows us to customize the error - // messages to be more appropriate for an explicit type - // conversion, whereas the cty function system produces - // messages aimed at _implicit_ type conversions. - Type: cty.DynamicPseudoType, - AllowNull: true, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - gotTy := args[0].Type() - if gotTy.Equals(wantTy) { - return wantTy, nil - } - conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) - if conv == nil { - // We'll use some specialized errors for some trickier cases, - // but most we can handle in a simple way. - switch { - case gotTy.IsTupleType() && wantTy.IsTupleType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - case gotTy.IsObjectType() && wantTy.IsObjectType(): - return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) - default: - return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - // If a conversion is available then everything is fine. - return wantTy, nil - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - // We didn't set "AllowUnknown" on our argument, so it is guaranteed - // to be known here but may still be null. - ret, err := convert.Convert(args[0], retType) - if err != nil { - // Because we used GetConversionUnsafe above, conversion can - // still potentially fail in here. For example, if the user - // asks to convert the string "a" to bool then we'll - // optimistically permit it during type checking but fail here - // once we note that the value isn't either "true" or "false". - gotTy := args[0].Type() - switch { - case gotTy == cty.String && wantTy == cty.Bool: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) - case gotTy == cty.String && wantTy == cty.Number: - what := "string" - if !args[0].IsNull() { - what = strconv.Quote(args[0].AsString()) - } - return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) - default: - return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) - } - } - return ret, nil - }, - }) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go deleted file mode 100644 index 28074fb1..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go +++ /dev/null @@ -1,325 +0,0 @@ -package funcs - -import ( - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "fmt" - "hash" - - uuidv5 "github.com/google/uuid" - uuid "github.com/hashicorp/go-uuid" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" - "golang.org/x/crypto/bcrypt" -) - -var UUIDFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - result, err := uuid.GenerateUUID() - if err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.StringVal(result), nil - }, -}) - -var UUIDV5Func = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "namespace", - Type: cty.String, - }, - { - Name: "name", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var namespace uuidv5.UUID - switch { - case args[0].AsString() == "dns": - namespace = uuidv5.NameSpaceDNS - case args[0].AsString() == "url": - namespace = uuidv5.NameSpaceURL - case args[0].AsString() == "oid": - namespace = uuidv5.NameSpaceOID - case args[0].AsString() == "x500": - namespace = uuidv5.NameSpaceX500 - default: - if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) - } - } - val := args[1].AsString() - return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil - }, -}) - -// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) -} - -// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string -// and encodes it with Base64. -var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) - -// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileBase64Sha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) -} - -// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. -var BcryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "cost", - Type: cty.Number, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - defaultCost := 10 - - if len(args) > 1 { - var val int - if err := gocty.FromCtyValue(args[1], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - defaultCost = val - } - - if len(args) > 2 { - return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") - } - - input := args[0].AsString() - out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. -var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) - -// MakeFileMd5Func constructs a function that is like Md5Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileMd5Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) -} - -// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. -var RsaDecryptFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "ciphertext", - Type: cty.String, - }, - { - Name: "privatekey", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - key := args[1].AsString() - - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) - } - - block, _ := pem.Decode([]byte(key)) - if block == nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") - } - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - return cty.UnknownVal(cty.String), fmt.Errorf( - "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", - ) - } - - x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(string(out)), nil - }, -}) - -// Sha1Func contructs a function that computes the SHA1 hash of a given string -// and encodes it with hexadecimal digits. -var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) - -// MakeFileSha1Func constructs a function that is like Sha1Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha1Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) -} - -// Sha256Func contructs a function that computes the SHA256 hash of a given string -// and encodes it with hexadecimal digits. -var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) - -// MakeFileSha256Func constructs a function that is like Sha256Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha256Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) -} - -// Sha512Func contructs a function that computes the SHA512 hash of a given string -// and encodes it with hexadecimal digits. -var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) - -// MakeFileSha512Func constructs a function that is like Sha512Func but reads the -// contents of a file rather than hashing a given literal string. -func MakeFileSha512Func(baseDir string) function.Function { - return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) -} - -func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - s := args[0].AsString() - h := hf() - h.Write([]byte(s)) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - h := hf() - h.Write(src) - rv := enc(h.Sum(nil)) - return cty.StringVal(rv), nil - }, - }) -} - -// UUID generates and returns a Type-4 UUID in the standard hexadecimal string -// format. -// -// This is not a pure function: it will generate a different result for each -// call. It must therefore be registered as an impure function in the function -// table in the "lang" package. -func UUID() (cty.Value, error) { - return UUIDFunc.Call(nil) -} - -// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string -// format. -func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { - return UUIDV5Func.Call([]cty.Value{namespace, name}) -} - -// Base64Sha256 computes the SHA256 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -func Base64Sha256(str cty.Value) (cty.Value, error) { - return Base64Sha256Func.Call([]cty.Value{str}) -} - -// Base64Sha512 computes the SHA512 hash of a given string and encodes it with -// Base64. -// -// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied -// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 -func Base64Sha512(str cty.Value) (cty.Value, error) { - return Base64Sha512Func.Call([]cty.Value{str}) -} - -// Bcrypt computes a hash of the given string using the Blowfish cipher, -// returning a string in the Modular Crypt Format -// usually expected in the shadow password file on many Unix systems. -func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(cost)+1) - args[0] = str - copy(args[1:], cost) - return BcryptFunc.Call(args) -} - -// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. -func Md5(str cty.Value) (cty.Value, error) { - return Md5Func.Call([]cty.Value{str}) -} - -// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding -// cleartext. -func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { - return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) -} - -// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. -func Sha1(str cty.Value) (cty.Value, error) { - return Sha1Func.Call([]cty.Value{str}) -} - -// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. -func Sha256(str cty.Value) (cty.Value, error) { - return Sha256Func.Call([]cty.Value{str}) -} - -// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. -func Sha512(str cty.Value) (cty.Value, error) { - return Sha512Func.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go deleted file mode 100644 index 5dae1987..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go +++ /dev/null @@ -1,70 +0,0 @@ -package funcs - -import ( - "time" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// TimestampFunc constructs a function that returns a string representation of the current date and time. -var TimestampFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil - }, -}) - -// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. -var TimeAddFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "timestamp", - Type: cty.String, - }, - { - Name: "duration", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - ts, err := time.Parse(time.RFC3339, args[0].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - duration, err := time.ParseDuration(args[1].AsString()) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil - }, -}) - -// Timestamp returns a string representation of the current date and time. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax, and so timestamp -// returns a string in this format. -func Timestamp() (cty.Value, error) { - return TimestampFunc.Call([]cty.Value{}) -} - -// TimeAdd adds a duration to a timestamp, returning a new timestamp. -// -// In the Terraform language, timestamps are conventionally represented as -// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires -// the timestamp argument to be a string conforming to this syntax. -// -// `duration` is a string representation of a time difference, consisting of -// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted -// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first -// number may be negative to indicate a negative duration, like `"-2h5m"`. -// -// The result is a string, also in RFC 3339 format, representing the result -// of adding the given direction to the given timestamp. -func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { - return TimeAddFunc.Call([]cty.Value{timestamp, duration}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go deleted file mode 100644 index af93f08d..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go +++ /dev/null @@ -1,140 +0,0 @@ -package funcs - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "fmt" - "log" - "net/url" - "unicode/utf8" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. -var Base64DecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - sDec, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) - } - if !utf8.Valid([]byte(sDec)) { - log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) - return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") - } - return cty.StringVal(string(sDec)), nil - }, -}) - -// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. -var Base64EncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil - }, -}) - -// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in -// Base64 encoding. -var Base64GzipFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - s := args[0].AsString() - - var b bytes.Buffer - gz := gzip.NewWriter(&b) - if _, err := gz.Write([]byte(s)); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) - } - if err := gz.Flush(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) - } - if err := gz.Close(); err != nil { - return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) - } - return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil - }, -}) - -// URLEncodeFunc constructs a function that applies URL encoding to a given string. -var URLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(url.QueryEscape(args[0].AsString())), nil - }, -}) - -// Base64Decode decodes a string containing a base64 sequence. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will also interpret the resulting bytes as -// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function -// produces an error. -func Base64Decode(str cty.Value) (cty.Value, error) { - return Base64DecodeFunc.Call([]cty.Value{str}) -} - -// Base64Encode applies Base64 encoding to a string. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, and then apply Base64 encoding to the result. -func Base64Encode(str cty.Value) (cty.Value, error) { - return Base64EncodeFunc.Call([]cty.Value{str}) -} - -// Base64Gzip compresses a string with gzip and then encodes the result in -// Base64 encoding. -// -// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. -// -// Strings in the Terraform language are sequences of unicode characters rather -// than bytes, so this function will first encode the characters from the string -// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. -func Base64Gzip(str cty.Value) (cty.Value, error) { - return Base64GzipFunc.Call([]cty.Value{str}) -} - -// URLEncode applies URL encoding to a given string. -// -// This function identifies characters in the given string that would have a -// special meaning when included as a query string argument in a URL and -// escapes them using RFC 3986 "percent encoding". -// -// If the given string contains non-ASCII characters, these are first encoded as -// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. -func URLEncode(str cty.Value) (cty.Value, error) { - return URLEncodeFunc.Call([]cty.Value{str}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go deleted file mode 100644 index 4b899cbc..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go +++ /dev/null @@ -1,439 +0,0 @@ -package funcs - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "unicode/utf8" - - "github.com/bmatcuk/doublestar" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - homedir "github.com/mitchellh/go-homedir" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// MakeFileFunc constructs a function that takes a file path and returns the -// contents of that file, either directly as a string (where valid UTF-8 is -// required) or as a string containing base64 bytes. -func MakeFileFunc(baseDir string, encBase64 bool) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - src, err := readFileBytes(baseDir, path) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - switch { - case encBase64: - enc := base64.StdEncoding.EncodeToString(src) - return cty.StringVal(enc), nil - default: - if !utf8.Valid(src) { - return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) - } - return cty.StringVal(string(src)), nil - } - }, - }) -} - -// MakeTemplateFileFunc constructs a function that takes a file path and -// an arbitrary object of named values and attempts to render the referenced -// file as a template using HCL template syntax. -// -// The template itself may recursively call other functions so a callback -// must be provided to get access to those functions. The template cannot, -// however, access any variables defined in the scope: it is restricted only to -// those variables provided in the second function argument, to ensure that all -// dependencies on other graph nodes can be seen before executing this function. -// -// As a special exception, a referenced template file may not recursively call -// the templatefile function, since that would risk the same file being -// included into itself indefinitely. -func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { - - params := []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - { - Name: "vars", - Type: cty.DynamicPseudoType, - }, - } - - loadTmpl := func(fn string) (hcl.Expression, error) { - // We re-use File here to ensure the same filename interpretation - // as it does, along with its other safety checks. - tmplVal, err := File(baseDir, cty.StringVal(fn)) - if err != nil { - return nil, err - } - - expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) - if diags.HasErrors() { - return nil, diags - } - - return expr, nil - } - - renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { - if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { - return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time - } - - ctx := &hcl.EvalContext{ - Variables: varsVal.AsValueMap(), - } - - // We'll pre-check references in the template here so we can give a - // more specialized error message than HCL would by default, so it's - // clearer that this problem is coming from a templatefile call. - for _, traversal := range expr.Variables() { - root := traversal.RootName() - if _, ok := ctx.Variables[root]; !ok { - return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) - } - } - - givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems - funcs := make(map[string]function.Function, len(givenFuncs)) - for name, fn := range givenFuncs { - if name == "templatefile" { - // We stub this one out to prevent recursive calls. - funcs[name] = function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") - }, - }) - continue - } - funcs[name] = fn - } - ctx.Functions = funcs - - val, diags := expr.Value(ctx) - if diags.HasErrors() { - return cty.DynamicVal, diags - } - return val, nil - } - - return function.New(&function.Spec{ - Params: params, - Type: func(args []cty.Value) (cty.Type, error) { - if !(args[0].IsKnown() && args[1].IsKnown()) { - return cty.DynamicPseudoType, nil - } - - // We'll render our template now to see what result type it produces. - // A template consisting only of a single interpolation an potentially - // return any type. - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicPseudoType, err - } - - // This is safe even if args[1] contains unknowns because the HCL - // template renderer itself knows how to short-circuit those. - val, err := renderTmpl(expr, args[1]) - return val.Type(), err - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - expr, err := loadTmpl(args[0].AsString()) - if err != nil { - return cty.DynamicVal, err - } - return renderTmpl(expr, args[1]) - }, - }) - -} - -// MakeFileExistsFunc constructs a function that takes a path -// and determines whether a file exists at that path -func MakeFileExistsFunc(baseDir string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.Bool), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - path, err := homedir.Expand(path) - if err != nil { - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return cty.False, nil - } - return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) - } - - if fi.Mode().IsRegular() { - return cty.True, nil - } - - return cty.False, fmt.Errorf("%s is not a regular file, but %q", - path, fi.Mode().String()) - }, - }) -} - -// MakeFileSetFunc constructs a function that takes a glob pattern -// and enumerates a file set from that pattern -func MakeFileSetFunc(baseDir string) function.Function { - return function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - { - Name: "pattern", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.Set(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - path := args[0].AsString() - pattern := args[1].AsString() - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Join the path to the glob pattern, while ensuring the full - // pattern is canonical for the host OS. The joined path is - // automatically cleaned during this operation. - pattern = filepath.Join(path, pattern) - - matches, err := doublestar.Glob(pattern) - if err != nil { - return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern (%s): %s", pattern, err) - } - - var matchVals []cty.Value - for _, match := range matches { - fi, err := os.Stat(match) - - if err != nil { - return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat (%s): %s", match, err) - } - - if !fi.Mode().IsRegular() { - continue - } - - // Remove the path and file separator from matches. - match, err = filepath.Rel(path, match) - - if err != nil { - return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match (%s): %s", match, err) - } - - // Replace any remaining file separators with forward slash (/) - // separators for cross-system compatibility. - match = filepath.ToSlash(match) - - matchVals = append(matchVals, cty.StringVal(match)) - } - - if len(matchVals) == 0 { - return cty.SetValEmpty(cty.String), nil - } - - return cty.SetVal(matchVals), nil - }, - }) -} - -// BasenameFunc constructs a function that takes a string containing a filesystem path -// and removes all except the last portion from it. -var BasenameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Base(args[0].AsString())), nil - }, -}) - -// DirnameFunc constructs a function that takes a string containing a filesystem path -// and removes the last portion from it. -var DirnameFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(filepath.Dir(args[0].AsString())), nil - }, -}) - -// AbsPathFunc constructs a function that converts a filesystem path to an absolute path -var AbsPathFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - absPath, err := filepath.Abs(args[0].AsString()) - return cty.StringVal(filepath.ToSlash(absPath)), err - }, -}) - -// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. -var PathExpandFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "path", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - - homePath, err := homedir.Expand(args[0].AsString()) - return cty.StringVal(homePath), err - }, -}) - -func readFileBytes(baseDir, path string) ([]byte, error) { - path, err := homedir.Expand(path) - if err != nil { - return nil, fmt.Errorf("failed to expand ~: %s", err) - } - - if !filepath.IsAbs(path) { - path = filepath.Join(baseDir, path) - } - - // Ensure that the path is canonical for the host OS - path = filepath.Clean(path) - - src, err := ioutil.ReadFile(path) - if err != nil { - // ReadFile does not return Terraform-user-friendly error - // messages, so we'll provide our own. - if os.IsNotExist(err) { - return nil, fmt.Errorf("no file exists at %s", path) - } - return nil, fmt.Errorf("failed to read %s", path) - } - - return src, nil -} - -// File reads the contents of the file at the given path. -// -// The file must contain valid UTF-8 bytes, or this function will return an error. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func File(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, false) - return fn.Call([]cty.Value{path}) -} - -// FileExists determines whether a file exists at the given path. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileExists(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileExistsFunc(baseDir) - return fn.Call([]cty.Value{path}) -} - -// FileSet enumerates a set of files given a glob pattern -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) { - fn := MakeFileSetFunc(baseDir) - return fn.Call([]cty.Value{path, pattern}) -} - -// FileBase64 reads the contents of the file at the given path. -// -// The bytes from the file are encoded as base64 before returning. -// -// The underlying function implementation works relative to a particular base -// directory, so this wrapper takes a base directory string and uses it to -// construct the underlying function before calling it. -func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { - fn := MakeFileFunc(baseDir, true) - return fn.Call([]cty.Value{path}) -} - -// Basename takes a string containing a filesystem path and removes all except the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Basename(path cty.Value) (cty.Value, error) { - return BasenameFunc.Call([]cty.Value{path}) -} - -// Dirname takes a string containing a filesystem path and removes the last portion from it. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the path is empty then the result is ".", representing the current working directory. -func Dirname(path cty.Value) (cty.Value, error) { - return DirnameFunc.Call([]cty.Value{path}) -} - -// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with -// the current user's home directory path. -// -// The underlying function implementation works only with the path string and does not access the filesystem itself. -// It is therefore unable to take into account filesystem features such as symlinks. -// -// If the leading segment in the path is not `~` then the given path is returned unmodified. -func Pathexpand(path cty.Value) (cty.Value, error) { - return PathExpandFunc.Call([]cty.Value{path}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go deleted file mode 100644 index c813f47b..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go +++ /dev/null @@ -1,217 +0,0 @@ -package funcs - -import ( - "math" - "math/big" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -// CeilFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var CeilFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.NumberIntVal(int64(math.Ceil(val))), nil - }, -}) - -// FloorFunc contructs a function that returns the closest whole number lesser -// than or equal to the given value. -var FloorFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var val float64 - if err := gocty.FromCtyValue(args[0], &val); err != nil { - return cty.UnknownVal(cty.String), err - } - return cty.NumberIntVal(int64(math.Floor(val))), nil - }, -}) - -// LogFunc contructs a function that returns the logarithm of a given number in a given base. -var LogFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var base float64 - if err := gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil - }, -}) - -// PowFunc contructs a function that returns the logarithm of a given number in a given base. -var PowFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - { - Name: "power", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num float64 - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - - var power float64 - if err := gocty.FromCtyValue(args[1], &power); err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.NumberFloatVal(math.Pow(num, power)), nil - }, -}) - -// SignumFunc contructs a function that returns the closest whole number greater -// than or equal to the given value. -var SignumFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "num", - Type: cty.Number, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var num int - if err := gocty.FromCtyValue(args[0], &num); err != nil { - return cty.UnknownVal(cty.String), err - } - switch { - case num < 0: - return cty.NumberIntVal(-1), nil - case num > 0: - return cty.NumberIntVal(+1), nil - default: - return cty.NumberIntVal(0), nil - } - }, -}) - -// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. -var ParseIntFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "number", - Type: cty.DynamicPseudoType, - }, - { - Name: "base", - Type: cty.Number, - }, - }, - - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].Type().Equals(cty.String) { - return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) - } - return cty.Number, nil - }, - - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - var numstr string - var base int - var err error - - if err = gocty.FromCtyValue(args[0], &numstr); err != nil { - return cty.UnknownVal(cty.String), function.NewArgError(0, err) - } - - if err = gocty.FromCtyValue(args[1], &base); err != nil { - return cty.UnknownVal(cty.Number), function.NewArgError(1, err) - } - - if base < 2 || base > 62 { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 1, - "base must be a whole number between 2 and 62 inclusive", - ) - } - - num, ok := (&big.Int{}).SetString(numstr, base) - if !ok { - return cty.UnknownVal(cty.Number), function.NewArgErrorf( - 0, - "cannot parse %q as a base %d integer", - numstr, - base, - ) - } - - parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) - - return parsedNum, nil - }, -}) - -// Ceil returns the closest whole number greater than or equal to the given value. -func Ceil(num cty.Value) (cty.Value, error) { - return CeilFunc.Call([]cty.Value{num}) -} - -// Floor returns the closest whole number lesser than or equal to the given value. -func Floor(num cty.Value) (cty.Value, error) { - return FloorFunc.Call([]cty.Value{num}) -} - -// Log returns returns the logarithm of a given number in a given base. -func Log(num, base cty.Value) (cty.Value, error) { - return LogFunc.Call([]cty.Value{num, base}) -} - -// Pow returns the logarithm of a given number in a given base. -func Pow(num, power cty.Value) (cty.Value, error) { - return PowFunc.Call([]cty.Value{num, power}) -} - -// Signum determines the sign of a number, returning a number between -1 and -// 1 to represent the sign. -func Signum(num cty.Value) (cty.Value, error) { - return SignumFunc.Call([]cty.Value{num}) -} - -// ParseInt parses a string argument and returns an integer of the specified base. -func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { - return ParseIntFunc.Call([]cty.Value{num, base}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go deleted file mode 100644 index 2e66be45..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go +++ /dev/null @@ -1,358 +0,0 @@ -package funcs - -import ( - "fmt" - "regexp" - "sort" - "strings" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/gocty" -) - -var JoinFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "separator", - Type: cty.String, - }, - }, - VarParam: &function.Parameter{ - Name: "lists", - Type: cty.List(cty.String), - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - sep := args[0].AsString() - listVals := args[1:] - if len(listVals) < 1 { - return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") - } - - l := 0 - for _, list := range listVals { - if !list.IsWhollyKnown() { - return cty.UnknownVal(cty.String), nil - } - l += list.LengthInt() - } - - items := make([]string, 0, l) - for ai, list := range listVals { - ei := 0 - for it := list.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.IsNull() { - if len(listVals) > 1 { - return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) - } - return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) - } - items = append(items, val.AsString()) - ei++ - } - } - - return cty.StringVal(strings.Join(items, sep)), nil - }, -}) - -var SortFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.List(cty.String), - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - listVal := args[0] - - if !listVal.IsWhollyKnown() { - // If some of the element values aren't known yet then we - // can't yet predict the order of the result. - return cty.UnknownVal(retType), nil - } - if listVal.LengthInt() == 0 { // Easy path - return listVal, nil - } - - list := make([]string, 0, listVal.LengthInt()) - for it := listVal.ElementIterator(); it.Next(); { - iv, v := it.Element() - if v.IsNull() { - return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) - } - list = append(list, v.AsString()) - } - - sort.Strings(list) - retVals := make([]cty.Value, len(list)) - for i, s := range list { - retVals[i] = cty.StringVal(s) - } - return cty.ListVal(retVals), nil - }, -}) - -var SplitFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "separator", - Type: cty.String, - }, - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - sep := args[0].AsString() - str := args[1].AsString() - elems := strings.Split(str, sep) - elemVals := make([]cty.Value, len(elems)) - for i, s := range elems { - elemVals[i] = cty.StringVal(s) - } - if len(elemVals) == 0 { - return cty.ListValEmpty(cty.String), nil - } - return cty.ListVal(elemVals), nil - }, -}) - -// ChompFunc constructs a function that removes newline characters at the end of a string. -var ChompFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) - return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil - }, -}) - -// IndentFunc constructs a function that adds a given number of spaces to the -// beginnings of all but the first line in a given multi-line string. -var IndentFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "spaces", - Type: cty.Number, - }, - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var spaces int - if err := gocty.FromCtyValue(args[0], &spaces); err != nil { - return cty.UnknownVal(cty.String), err - } - data := args[1].AsString() - pad := strings.Repeat(" ", spaces) - return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil - }, -}) - -// ReplaceFunc constructs a function that searches a given string for another -// given substring, and replaces each occurence with a given replacement string. -var ReplaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "substr", - Type: cty.String, - }, - { - Name: "replace", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - str := args[0].AsString() - substr := args[1].AsString() - replace := args[2].AsString() - - // We search/replace using a regexp if the string is surrounded - // in forward slashes. - if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { - re, err := regexp.Compile(substr[1 : len(substr)-1]) - if err != nil { - return cty.UnknownVal(cty.String), err - } - - return cty.StringVal(re.ReplaceAllString(str, replace)), nil - } - - return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil - }, -}) - -// TitleFunc constructs a function that converts the first letter of each word -// in the given string to uppercase. -var TitleFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - return cty.StringVal(strings.Title(args[0].AsString())), nil - }, -}) - -// TrimSpaceFunc constructs a function that removes any space characters from -// the start and end of the given string. -var TrimSpaceFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil - }, -}) - -// TrimFunc constructs a function that removes the specified characters from -// the start and end of the given string. -var TrimFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "cutset", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - str := args[0].AsString() - cutset := args[1].AsString() - return cty.StringVal(strings.Trim(str, cutset)), nil - }, -}) - -// TrimPrefixFunc constructs a function that removes the specified characters from -// the start the given string. -var TrimPrefixFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "prefix", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - str := args[0].AsString() - prefix := args[1].AsString() - return cty.StringVal(strings.TrimPrefix(str, prefix)), nil - }, -}) - -// TrimSuffixFunc constructs a function that removes the specified characters from -// the end of the given string. -var TrimSuffixFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "str", - Type: cty.String, - }, - { - Name: "suffix", - Type: cty.String, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - str := args[0].AsString() - cutset := args[1].AsString() - return cty.StringVal(strings.TrimSuffix(str, cutset)), nil - }, -}) - -// Join concatenates together the string elements of one or more lists with a -// given separator. -func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { - args := make([]cty.Value, len(lists)+1) - args[0] = sep - copy(args[1:], lists) - return JoinFunc.Call(args) -} - -// Sort re-orders the elements of a given list of strings so that they are -// in ascending lexicographical order. -func Sort(list cty.Value) (cty.Value, error) { - return SortFunc.Call([]cty.Value{list}) -} - -// Split divides a given string by a given separator, returning a list of -// strings containing the characters between the separator sequences. -func Split(sep, str cty.Value) (cty.Value, error) { - return SplitFunc.Call([]cty.Value{sep, str}) -} - -// Chomp removes newline characters at the end of a string. -func Chomp(str cty.Value) (cty.Value, error) { - return ChompFunc.Call([]cty.Value{str}) -} - -// Indent adds a given number of spaces to the beginnings of all but the first -// line in a given multi-line string. -func Indent(spaces, str cty.Value) (cty.Value, error) { - return IndentFunc.Call([]cty.Value{spaces, str}) -} - -// Replace searches a given string for another given substring, -// and replaces all occurences with a given replacement string. -func Replace(str, substr, replace cty.Value) (cty.Value, error) { - return ReplaceFunc.Call([]cty.Value{str, substr, replace}) -} - -// Title converts the first letter of each word in the given string to uppercase. -func Title(str cty.Value) (cty.Value, error) { - return TitleFunc.Call([]cty.Value{str}) -} - -// TrimSpace removes any space characters from the start and end of the given string. -func TrimSpace(str cty.Value) (cty.Value, error) { - return TrimSpaceFunc.Call([]cty.Value{str}) -} - -// Trim removes the specified characters from the start and end of the given string. -func Trim(str, cutset cty.Value) (cty.Value, error) { - return TrimFunc.Call([]cty.Value{str, cutset}) -} - -// TrimPrefix removes the specified prefix from the start of the given string. -func TrimPrefix(str, prefix cty.Value) (cty.Value, error) { - return TrimPrefixFunc.Call([]cty.Value{str, prefix}) -} - -// TrimSuffix removes the specified suffix from the end of the given string. -func TrimSuffix(str, suffix cty.Value) (cty.Value, error) { - return TrimSuffixFunc.Call([]cty.Value{str, suffix}) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go deleted file mode 100644 index dabffb66..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/functions.go +++ /dev/null @@ -1,165 +0,0 @@ -package lang - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/ext/tryfunc" - ctyyaml "github.com/zclconf/go-cty-yaml" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" - "github.com/zclconf/go-cty/cty/function/stdlib" - - "github.com/hashicorp/terraform/lang/funcs" -) - -var impureFunctions = []string{ - "bcrypt", - "timestamp", - "uuid", -} - -// Functions returns the set of functions that should be used to when evaluating -// expressions in the receiving scope. -func (s *Scope) Functions() map[string]function.Function { - s.funcsLock.Lock() - if s.funcs == nil { - // Some of our functions are just directly the cty stdlib functions. - // Others are implemented in the subdirectory "funcs" here in this - // repository. New functions should generally start out their lives - // in the "funcs" directory and potentially graduate to cty stdlib - // later if the functionality seems to be something domain-agnostic - // that would be useful to all applications using cty functions. - - s.funcs = map[string]function.Function{ - "abs": stdlib.AbsoluteFunc, - "abspath": funcs.AbsPathFunc, - "basename": funcs.BasenameFunc, - "base64decode": funcs.Base64DecodeFunc, - "base64encode": funcs.Base64EncodeFunc, - "base64gzip": funcs.Base64GzipFunc, - "base64sha256": funcs.Base64Sha256Func, - "base64sha512": funcs.Base64Sha512Func, - "bcrypt": funcs.BcryptFunc, - "can": tryfunc.CanFunc, - "ceil": funcs.CeilFunc, - "chomp": funcs.ChompFunc, - "cidrhost": funcs.CidrHostFunc, - "cidrnetmask": funcs.CidrNetmaskFunc, - "cidrsubnet": funcs.CidrSubnetFunc, - "cidrsubnets": funcs.CidrSubnetsFunc, - "coalesce": funcs.CoalesceFunc, - "coalescelist": funcs.CoalesceListFunc, - "compact": funcs.CompactFunc, - "concat": stdlib.ConcatFunc, - "contains": funcs.ContainsFunc, - "csvdecode": stdlib.CSVDecodeFunc, - "dirname": funcs.DirnameFunc, - "distinct": funcs.DistinctFunc, - "element": funcs.ElementFunc, - "chunklist": funcs.ChunklistFunc, - "file": funcs.MakeFileFunc(s.BaseDir, false), - "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), - "fileset": funcs.MakeFileSetFunc(s.BaseDir), - "filebase64": funcs.MakeFileFunc(s.BaseDir, true), - "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), - "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), - "filemd5": funcs.MakeFileMd5Func(s.BaseDir), - "filesha1": funcs.MakeFileSha1Func(s.BaseDir), - "filesha256": funcs.MakeFileSha256Func(s.BaseDir), - "filesha512": funcs.MakeFileSha512Func(s.BaseDir), - "flatten": funcs.FlattenFunc, - "floor": funcs.FloorFunc, - "format": stdlib.FormatFunc, - "formatdate": stdlib.FormatDateFunc, - "formatlist": stdlib.FormatListFunc, - "indent": funcs.IndentFunc, - "index": funcs.IndexFunc, - "join": funcs.JoinFunc, - "jsondecode": stdlib.JSONDecodeFunc, - "jsonencode": stdlib.JSONEncodeFunc, - "keys": funcs.KeysFunc, - "length": funcs.LengthFunc, - "list": funcs.ListFunc, - "log": funcs.LogFunc, - "lookup": funcs.LookupFunc, - "lower": stdlib.LowerFunc, - "map": funcs.MapFunc, - "matchkeys": funcs.MatchkeysFunc, - "max": stdlib.MaxFunc, - "md5": funcs.Md5Func, - "merge": funcs.MergeFunc, - "min": stdlib.MinFunc, - "parseint": funcs.ParseIntFunc, - "pathexpand": funcs.PathExpandFunc, - "pow": funcs.PowFunc, - "range": stdlib.RangeFunc, - "regex": stdlib.RegexFunc, - "regexall": stdlib.RegexAllFunc, - "replace": funcs.ReplaceFunc, - "reverse": funcs.ReverseFunc, - "rsadecrypt": funcs.RsaDecryptFunc, - "setintersection": stdlib.SetIntersectionFunc, - "setproduct": funcs.SetProductFunc, - "setsubtract": stdlib.SetSubtractFunc, - "setunion": stdlib.SetUnionFunc, - "sha1": funcs.Sha1Func, - "sha256": funcs.Sha256Func, - "sha512": funcs.Sha512Func, - "signum": funcs.SignumFunc, - "slice": funcs.SliceFunc, - "sort": funcs.SortFunc, - "split": funcs.SplitFunc, - "strrev": stdlib.ReverseFunc, - "substr": stdlib.SubstrFunc, - "timestamp": funcs.TimestampFunc, - "timeadd": funcs.TimeAddFunc, - "title": funcs.TitleFunc, - "tostring": funcs.MakeToFunc(cty.String), - "tonumber": funcs.MakeToFunc(cty.Number), - "tobool": funcs.MakeToFunc(cty.Bool), - "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), - "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), - "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), - "transpose": funcs.TransposeFunc, - "trim": funcs.TrimFunc, - "trimprefix": funcs.TrimPrefixFunc, - "trimspace": funcs.TrimSpaceFunc, - "trimsuffix": funcs.TrimSuffixFunc, - "try": tryfunc.TryFunc, - "upper": stdlib.UpperFunc, - "urlencode": funcs.URLEncodeFunc, - "uuid": funcs.UUIDFunc, - "uuidv5": funcs.UUIDV5Func, - "values": funcs.ValuesFunc, - "yamldecode": ctyyaml.YAMLDecodeFunc, - "yamlencode": ctyyaml.YAMLEncodeFunc, - "zipmap": funcs.ZipmapFunc, - } - - s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { - // The templatefile function prevents recursive calls to itself - // by copying this map and overwriting the "templatefile" entry. - return s.funcs - }) - - if s.PureOnly { - // Force our few impure functions to return unknown so that we - // can defer evaluating them until a later pass. - for _, name := range impureFunctions { - s.funcs[name] = function.Unpredictable(s.funcs[name]) - } - } - } - s.funcsLock.Unlock() - - return s.funcs -} - -var unimplFunc = function.New(&function.Spec{ - Type: func([]cty.Value) (cty.Type, error) { - return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented") - }, - Impl: func([]cty.Value, cty.Type) (cty.Value, error) { - return cty.DynamicVal, fmt.Errorf("function not yet implemented") - }, -}) diff --git a/vendor/github.com/hashicorp/terraform/lang/references.go b/vendor/github.com/hashicorp/terraform/lang/references.go deleted file mode 100644 index 569251cb..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/references.go +++ /dev/null @@ -1,81 +0,0 @@ -package lang - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang/blocktoattr" - "github.com/hashicorp/terraform/tfdiags" -) - -// References finds all of the references in the given set of traversals, -// returning diagnostics if any of the traversals cannot be interpreted as a -// reference. -// -// This function does not do any de-duplication of references, since references -// have source location information embedded in them and so any invalid -// references that are duplicated should have errors reported for each -// occurence. -// -// If the returned diagnostics contains errors then the result may be -// incomplete or invalid. Otherwise, the returned slice has one reference per -// given traversal, though it is not guaranteed that the references will -// appear in the same order as the given traversals. -func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { - if len(traversals) == 0 { - return nil, nil - } - - var diags tfdiags.Diagnostics - refs := make([]*addrs.Reference, 0, len(traversals)) - - for _, traversal := range traversals { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if ref == nil { - continue - } - refs = append(refs, ref) - } - - return refs, diags -} - -// ReferencesInBlock is a helper wrapper around References that first searches -// the given body for traversals, before converting those traversals to -// references. -// -// A block schema must be provided so that this function can determine where in -// the body variables are expected. -func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { - if body == nil { - return nil, nil - } - - // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or - // dynblock.VariablesHCLDec here because when we evaluate a block we'll - // first apply the dynamic block extension and _then_ the blocktoattr - // transform, and so blocktoattr.ExpandedVariables takes into account - // both of those transforms when it analyzes the body to ensure we find - // all of the references as if they'd already moved into their final - // locations, even though we can't expand dynamic blocks yet until we - // already know which variables are required. - // - // The set of cases we want to detect here is covered by the tests for - // the plan graph builder in the main 'terraform' package, since it's - // in a better position to test this due to having mock providers etc - // available. - traversals := blocktoattr.ExpandedVariables(body, schema) - return References(traversals) -} - -// ReferencesInExpr is a helper wrapper around References that first searches -// the given expression for traversals, before converting those traversals -// to references. -func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { - if expr == nil { - return nil, nil - } - traversals := expr.Variables() - return References(traversals) -} diff --git a/vendor/github.com/hashicorp/terraform/lang/scope.go b/vendor/github.com/hashicorp/terraform/lang/scope.go deleted file mode 100644 index 98fca6ba..00000000 --- a/vendor/github.com/hashicorp/terraform/lang/scope.go +++ /dev/null @@ -1,34 +0,0 @@ -package lang - -import ( - "sync" - - "github.com/zclconf/go-cty/cty/function" - - "github.com/hashicorp/terraform/addrs" -) - -// Scope is the main type in this package, allowing dynamic evaluation of -// blocks and expressions based on some contextual information that informs -// which variables and functions will be available. -type Scope struct { - // Data is used to resolve references in expressions. - Data Data - - // SelfAddr is the address that the "self" object should be an alias of, - // or nil if the "self" object should not be available at all. - SelfAddr addrs.Referenceable - - // BaseDir is the base directory used by any interpolation functions that - // accept filesystem paths as arguments. - BaseDir string - - // PureOnly can be set to true to request that any non-pure functions - // produce unknown value results rather than actually executing. This is - // important during a plan phase to avoid generating results that could - // then differ during apply. - PureOnly bool - - funcs map[string]function.Function - funcsLock sync.Mutex -} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go deleted file mode 100644 index 87c8431e..00000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go +++ /dev/null @@ -1,43 +0,0 @@ -package moduledeps - -import ( - "github.com/hashicorp/terraform/plugin/discovery" -) - -// Providers describes a set of provider dependencies for a given module. -// -// Each named provider instance can have one version constraint. -type Providers map[ProviderInstance]ProviderDependency - -// ProviderDependency describes the dependency for a particular provider -// instance, including both the set of allowed versions and the reason for -// the dependency. -type ProviderDependency struct { - Constraints discovery.Constraints - Reason ProviderDependencyReason -} - -// ProviderDependencyReason is an enumeration of reasons why a dependency might be -// present. -type ProviderDependencyReason int - -const ( - // ProviderDependencyExplicit means that there is an explicit "provider" - // block in the configuration for this module. - ProviderDependencyExplicit ProviderDependencyReason = iota - - // ProviderDependencyImplicit means that there is no explicit "provider" - // block but there is at least one resource that uses this provider. - ProviderDependencyImplicit - - // ProviderDependencyInherited is a special case of - // ProviderDependencyImplicit where a parent module has defined a - // configuration for the provider that has been inherited by at least one - // resource in this module. - ProviderDependencyInherited - - // ProviderDependencyFromState means that this provider is not currently - // referenced by configuration at all, but some existing instances in - // the state still depend on it. - ProviderDependencyFromState -) diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform/moduledeps/doc.go deleted file mode 100644 index 7eff0831..00000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package moduledeps contains types that can be used to describe the -// providers required for all of the modules in a module tree. -// -// It does not itself contain the functionality for populating such -// data structures; that's in Terraform core, since this package intentionally -// does not depend on terraform core to avoid package dependency cycles. -package moduledeps diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/module.go b/vendor/github.com/hashicorp/terraform/moduledeps/module.go deleted file mode 100644 index d6cbaf5c..00000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/module.go +++ /dev/null @@ -1,204 +0,0 @@ -package moduledeps - -import ( - "sort" - "strings" - - "github.com/hashicorp/terraform/plugin/discovery" -) - -// Module represents the dependencies of a single module, as well being -// a node in a tree of such structures representing the dependencies of -// an entire configuration. -type Module struct { - Name string - Providers Providers - Children []*Module -} - -// WalkFunc is a callback type for use with Module.WalkTree -type WalkFunc func(path []string, parent *Module, current *Module) error - -// WalkTree calls the given callback once for the receiver and then -// once for each descendent, in an order such that parents are called -// before their children and siblings are called in the order they -// appear in the Children slice. -// -// When calling the callback, parent will be nil for the first call -// for the receiving module, and then set to the direct parent of -// each module for the subsequent calls. -// -// The path given to the callback is valid only until the callback -// returns, after which it will be mutated and reused. Callbacks must -// therefore copy the path slice if they wish to retain it. -// -// If the given callback returns an error, the walk will be aborted at -// that point and that error returned to the caller. -// -// This function is not thread-safe for concurrent modifications of the -// data structure, so it's the caller's responsibility to arrange for that -// should it be needed. -// -// It is safe for a callback to modify the descendents of the "current" -// module, including the ordering of the Children slice itself, but the -// callback MUST NOT modify the parent module. -func (m *Module) WalkTree(cb WalkFunc) error { - return walkModuleTree(make([]string, 0, 1), nil, m, cb) -} - -func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { - path = append(path, current.Name) - err := cb(path, parent, current) - if err != nil { - return err - } - - for _, child := range current.Children { - err := walkModuleTree(path, current, child, cb) - if err != nil { - return err - } - } - return nil -} - -// SortChildren sorts the Children slice into lexicographic order by -// name, in-place. -// -// This is primarily useful prior to calling WalkTree so that the walk -// will proceed in a consistent order. -func (m *Module) SortChildren() { - sort.Sort(sortModules{m.Children}) -} - -// SortDescendents is a convenience wrapper for calling SortChildren on -// the receiver and all of its descendent modules. -func (m *Module) SortDescendents() { - m.WalkTree(func(path []string, parent *Module, current *Module) error { - current.SortChildren() - return nil - }) -} - -type sortModules struct { - modules []*Module -} - -func (s sortModules) Len() int { - return len(s.modules) -} - -func (s sortModules) Less(i, j int) bool { - cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) - return cmp < 0 -} - -func (s sortModules) Swap(i, j int) { - s.modules[i], s.modules[j] = s.modules[j], s.modules[i] -} - -// PluginRequirements produces a PluginRequirements structure that can -// be used with discovery.PluginMetaSet.ConstrainVersions to identify -// suitable plugins to satisfy the module's provider dependencies. -// -// This method only considers the direct requirements of the receiver. -// Use AllPluginRequirements to flatten the dependencies for the -// entire tree of modules. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) PluginRequirements() discovery.PluginRequirements { - ret := make(discovery.PluginRequirements) - for inst, dep := range m.Providers { - // m.Providers is keyed on provider names, such as "aws.foo". - // a PluginRequirements wants keys to be provider *types*, such - // as "aws". If there are multiple aliases for the same - // provider then we will flatten them into a single requirement - // by combining their constraint sets. - pty := inst.Type() - if existing, exists := ret[pty]; exists { - ret[pty].Versions = existing.Versions.Append(dep.Constraints) - } else { - ret[pty] = &discovery.PluginConstraints{ - Versions: dep.Constraints, - } - } - } - return ret -} - -// AllPluginRequirements calls PluginRequirements for the receiver and all -// of its descendents, and merges the result into a single PluginRequirements -// structure that would satisfy all of the modules together. -// -// Requirements returned by this method include only version constraints, -// and apply no particular SHA256 hash constraint. -func (m *Module) AllPluginRequirements() discovery.PluginRequirements { - var ret discovery.PluginRequirements - m.WalkTree(func(path []string, parent *Module, current *Module) error { - ret = ret.Merge(current.PluginRequirements()) - return nil - }) - return ret -} - -// Equal returns true if the receiver is the root of an identical tree -// to the other given Module. This is a deep comparison that considers -// the equality of all downstream modules too. -// -// The children are considered to be ordered, so callers may wish to use -// SortDescendents first to normalize the order of the slices of child nodes. -// -// The implementation of this function is not optimized since it is provided -// primarily for use in tests. -func (m *Module) Equal(other *Module) bool { - // take care of nils first - if m == nil && other == nil { - return true - } else if (m == nil && other != nil) || (m != nil && other == nil) { - return false - } - - if m.Name != other.Name { - return false - } - - if len(m.Providers) != len(other.Providers) { - return false - } - if len(m.Children) != len(other.Children) { - return false - } - - // Can't use reflect.DeepEqual on this provider structure because - // the nested Constraints objects contain function pointers that - // never compare as equal. So we'll need to walk it the long way. - for inst, dep := range m.Providers { - if _, exists := other.Providers[inst]; !exists { - return false - } - - if dep.Reason != other.Providers[inst].Reason { - return false - } - - // Constraints are not too easy to compare robustly, so - // we'll just use their string representations as a proxy - // for now. - if dep.Constraints.String() != other.Providers[inst].Constraints.String() { - return false - } - } - - // Above we already checked that we have the same number of children - // in each module, so now we just need to check that they are - // recursively equal. - for i := range m.Children { - if !m.Children[i].Equal(other.Children[i]) { - return false - } - } - - // If we fall out here then they are equal - return true -} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform/moduledeps/provider.go deleted file mode 100644 index 89ceefb2..00000000 --- a/vendor/github.com/hashicorp/terraform/moduledeps/provider.go +++ /dev/null @@ -1,30 +0,0 @@ -package moduledeps - -import ( - "strings" -) - -// ProviderInstance describes a particular provider instance by its full name, -// like "null" or "aws.foo". -type ProviderInstance string - -// Type returns the provider type of this instance. For example, for an instance -// named "aws.foo" the type is "aws". -func (p ProviderInstance) Type() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - t = t[:dotPos] - } - return t -} - -// Alias returns the alias of this provider, if any. An instance named "aws.foo" -// has the alias "foo", while an instance named just "docker" has no alias, -// so the empty string would be returned. -func (p ProviderInstance) Alias() string { - t := string(p) - if dotPos := strings.Index(t, "."); dotPos != -1 { - return t[dotPos+1:] - } - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/plans/action.go b/vendor/github.com/hashicorp/terraform/plans/action.go deleted file mode 100644 index c653b106..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/action.go +++ /dev/null @@ -1,22 +0,0 @@ -package plans - -type Action rune - -const ( - NoOp Action = 0 - Create Action = '+' - Read Action = '←' - Update Action = '~' - DeleteThenCreate Action = '∓' - CreateThenDelete Action = '±' - Delete Action = '-' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type Action - -// IsReplace returns true if the action is one of the two actions that -// represents replacing an existing object with a new object: -// DeleteThenCreate or CreateThenDelete. -func (a Action) IsReplace() bool { - return a == DeleteThenCreate || a == CreateThenDelete -} diff --git a/vendor/github.com/hashicorp/terraform/plans/action_string.go b/vendor/github.com/hashicorp/terraform/plans/action_string.go deleted file mode 100644 index be43ab17..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/action_string.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by "stringer -type Action"; DO NOT EDIT. - -package plans - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoOp-0] - _ = x[Create-43] - _ = x[Read-8592] - _ = x[Update-126] - _ = x[DeleteThenCreate-8723] - _ = x[CreateThenDelete-177] - _ = x[Delete-45] -} - -const ( - _Action_name_0 = "NoOp" - _Action_name_1 = "Create" - _Action_name_2 = "Delete" - _Action_name_3 = "Update" - _Action_name_4 = "CreateThenDelete" - _Action_name_5 = "Read" - _Action_name_6 = "DeleteThenCreate" -) - -func (i Action) String() string { - switch { - case i == 0: - return _Action_name_0 - case i == 43: - return _Action_name_1 - case i == 45: - return _Action_name_2 - case i == 126: - return _Action_name_3 - case i == 177: - return _Action_name_4 - case i == 8592: - return _Action_name_5 - case i == 8723: - return _Action_name_6 - default: - return "Action(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes.go b/vendor/github.com/hashicorp/terraform/plans/changes.go deleted file mode 100644 index d7e0dcdb..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes.go +++ /dev/null @@ -1,308 +0,0 @@ -package plans - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/zclconf/go-cty/cty" -) - -// Changes describes various actions that Terraform will attempt to take if -// the corresponding plan is applied. -// -// A Changes object can be rendered into a visual diff (by the caller, using -// code in another package) for display to the user. -type Changes struct { - // Resources tracks planned changes to resource instance objects. - Resources []*ResourceInstanceChangeSrc - - // Outputs tracks planned changes output values. - // - // Note that although an in-memory plan contains planned changes for - // outputs throughout the configuration, a plan serialized - // to disk retains only the root outputs because they are - // externally-visible, while other outputs are implementation details and - // can be easily re-calculated during the apply phase. Therefore only root - // module outputs will survive a round-trip through a plan file. - Outputs []*OutputChangeSrc -} - -// NewChanges returns a valid Changes object that describes no changes. -func NewChanges() *Changes { - return &Changes{} -} - -func (c *Changes) Empty() bool { - for _, res := range c.Resources { - if res.Action != NoOp { - return false - } - } - return true -} - -// ResourceInstance returns the planned change for the current object of the -// resource instance of the given address, if any. Returns nil if no change is -// planned. -func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { - return rc - } - } - - return nil -} - -// ResourceInstanceDeposed returns the plan change of a deposed object of -// the resource instance of the given address, if any. Returns nil if no change -// is planned. -func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { - addrStr := addr.String() - for _, rc := range c.Resources { - if rc.Addr.String() == addrStr && rc.DeposedKey == key { - return rc - } - } - - return nil -} - -// OutputValue returns the planned change for the output value with the -// given address, if any. Returns nil if no change is planned. -func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { - addrStr := addr.String() - for _, oc := range c.Outputs { - if oc.Addr.String() == addrStr { - return oc - } - } - - return nil -} - -// SyncWrapper returns a wrapper object around the receiver that can be used -// to make certain changes to the receiver in a concurrency-safe way, as long -// as all callers share the same wrapper object. -func (c *Changes) SyncWrapper() *ChangesSync { - return &ChangesSync{ - changes: c, - } -} - -// ResourceInstanceChange describes a change to a particular resource instance -// object. -type ResourceInstanceChange struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // Change is an embedded description of the change. - Change - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the implied type of the -// corresponding resource type schema for correct operation. -func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { - cs, err := rc.Change.Encode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChangeSrc{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - ProviderAddr: rc.ProviderAddr, - ChangeSrc: *cs, - RequiredReplace: rc.RequiredReplace, - Private: rc.Private, - }, err -} - -// Simplify will, where possible, produce a change with a simpler action than -// the receiever given a flag indicating whether the caller is dealing with -// a normal apply or a destroy. This flag deals with the fact that Terraform -// Core uses a specialized graph node type for destroying; only that -// specialized node should set "destroying" to true. -// -// The following table shows the simplification behavior: -// -// Action Destroying? New Action -// --------+-------------+----------- -// Create true NoOp -// Delete false NoOp -// Replace true Delete -// Replace false Create -// -// For any combination not in the above table, the Simplify just returns the -// receiver as-is. -func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { - if destroying { - switch rc.Action { - case Delete: - // We'll fall out and just return rc verbatim, then. - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Delete, - Before: rc.Before, - After: cty.NullVal(rc.Before.Type()), - }, - } - default: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - } - } else { - switch rc.Action { - case Delete: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: NoOp, - Before: rc.Before, - After: rc.Before, - }, - } - case CreateThenDelete, DeleteThenCreate: - return &ResourceInstanceChange{ - Addr: rc.Addr, - DeposedKey: rc.DeposedKey, - Private: rc.Private, - ProviderAddr: rc.ProviderAddr, - Change: Change{ - Action: Create, - Before: cty.NullVal(rc.After.Type()), - After: rc.After, - }, - } - } - } - - // If we fall out here then our change is already simple enough. - return rc -} - -// OutputChange describes a change to an output value. -type OutputChange struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // Change is an embedded description of the change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - Change - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. -func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { - cs, err := oc.Change.Encode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChangeSrc{ - Addr: oc.Addr, - ChangeSrc: *cs, - Sensitive: oc.Sensitive, - }, err -} - -// Change describes a single change with a given action. -type Change struct { - // Action defines what kind of change is being made. - Action Action - - // Interpretation of Before and After depend on Action: - // - // NoOp Before and After are the same, unchanged value - // Create Before is nil, and After is the expected value after create. - // Read Before is any prior value (nil if no prior), and After is the - // value that was or will be read. - // Update Before is the value prior to update, and After is the expected - // value after update. - // Replace As with Update. - // Delete Before is the value prior to delete, and After is always nil. - // - // Unknown values may appear anywhere within the Before and After values, - // either as the values themselves or as nested elements within known - // collections/structures. - Before, After cty.Value -} - -// Encode produces a variant of the reciever that has its change values -// serialized so it can be written to a plan file. Pass the type constraint -// that the values are expected to conform to; to properly decode the values -// later an identical type constraint must be provided at that time. -// -// Where a Change is embedded in some other struct, it's generally better -// to call the corresponding Encode method of that struct rather than working -// directly with its embedded Change. -func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { - beforeDV, err := NewDynamicValue(c.Before, ty) - if err != nil { - return nil, err - } - afterDV, err := NewDynamicValue(c.After, ty) - if err != nil { - return nil, err - } - - return &ChangeSrc{ - Action: c.Action, - Before: beforeDV, - After: afterDV, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_src.go b/vendor/github.com/hashicorp/terraform/plans/changes_src.go deleted file mode 100644 index 90153ea7..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes_src.go +++ /dev/null @@ -1,190 +0,0 @@ -package plans - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/zclconf/go-cty/cty" -) - -// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. -// Pass the associated resource type's schema type to method Decode to -// obtain a ResourceInstancChange. -type ResourceInstanceChangeSrc struct { - // Addr is the absolute address of the resource instance that the change - // will apply to. - Addr addrs.AbsResourceInstance - - // DeposedKey is the identifier for a deposed object associated with the - // given instance, or states.NotDeposed if this change applies to the - // current object. - // - // A Replace change for a resource with create_before_destroy set will - // create a new DeposedKey temporarily during replacement. In that case, - // DeposedKey in the plan is always states.NotDeposed, representing that - // the current object is being replaced with the deposed. - DeposedKey states.DeposedKey - - // Provider is the address of the provider configuration that was used - // to plan this change, and thus the configuration that must also be - // used to apply it. - ProviderAddr addrs.AbsProviderConfig - - // ChangeSrc is an embedded description of the not-yet-decoded change. - ChangeSrc - - // RequiredReplace is a set of paths that caused the change action to be - // Replace rather than Update. Always nil if the change action is not - // Replace. - // - // This is retained only for UI-plan-rendering purposes and so it does not - // currently survive a round-trip through a saved plan file. - RequiredReplace cty.PathSet - - // Private allows a provider to stash any extra data that is opaque to - // Terraform that relates to this change. Terraform will save this - // byte-for-byte and return it to the provider in the apply call. - Private []byte -} - -// Decode unmarshals the raw representation of the instance object being -// changed. Pass the implied type of the corresponding resource type schema -// for correct operation. -func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { - change, err := rcs.ChangeSrc.Decode(ty) - if err != nil { - return nil, err - } - return &ResourceInstanceChange{ - Addr: rcs.Addr, - DeposedKey: rcs.DeposedKey, - ProviderAddr: rcs.ProviderAddr, - Change: *change, - RequiredReplace: rcs.RequiredReplace, - Private: rcs.Private, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { - if rcs == nil { - return nil - } - ret := *rcs - - ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) - - if len(ret.Private) != 0 { - private := make([]byte, len(ret.Private)) - copy(private, ret.Private) - ret.Private = private - } - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// OutputChangeSrc describes a change to an output value. -type OutputChangeSrc struct { - // Addr is the absolute address of the output value that the change - // will apply to. - Addr addrs.AbsOutputValue - - // ChangeSrc is an embedded description of the not-yet-decoded change. - // - // For output value changes, the type constraint for the DynamicValue - // instances is always cty.DynamicPseudoType. - ChangeSrc - - // Sensitive, if true, indicates that either the old or new value in the - // change is sensitive and so a rendered version of the plan in the UI - // should elide the actual values while still indicating the action of the - // change. - Sensitive bool -} - -// Decode unmarshals the raw representation of the output value being -// changed. -func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { - change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) - if err != nil { - return nil, err - } - return &OutputChange{ - Addr: ocs.Addr, - Change: *change, - Sensitive: ocs.Sensitive, - }, nil -} - -// DeepCopy creates a copy of the receiver where any pointers to nested mutable -// values are also copied, thus ensuring that future mutations of the receiver -// will not affect the copy. -// -// Some types used within a resource change are immutable by convention even -// though the Go language allows them to be mutated, such as the types from -// the addrs package. These are _not_ copied by this method, under the -// assumption that callers will behave themselves. -func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { - if ocs == nil { - return nil - } - ret := *ocs - - ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() - ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() - - return &ret -} - -// ChangeSrc is a not-yet-decoded Change. -type ChangeSrc struct { - // Action defines what kind of change is being made. - Action Action - - // Before and After correspond to the fields of the same name in Change, - // but have not yet been decoded from the serialized value used for - // storage. - Before, After DynamicValue -} - -// Decode unmarshals the raw representations of the before and after values -// to produce a Change object. Pass the type constraint that the result must -// conform to. -// -// Where a ChangeSrc is embedded in some other struct, it's generally better -// to call the corresponding Decode method of that struct rather than working -// directly with its embedded Change. -func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { - var err error - before := cty.NullVal(ty) - after := cty.NullVal(ty) - - if len(cs.Before) > 0 { - before, err = cs.Before.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'before' value: %s", err) - } - } - if len(cs.After) > 0 { - after, err = cs.After.Decode(ty) - if err != nil { - return nil, fmt.Errorf("error decoding 'after' value: %s", err) - } - } - return &Change{ - Action: cs.Action, - Before: before, - After: after, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_state.go b/vendor/github.com/hashicorp/terraform/plans/changes_state.go deleted file mode 100644 index 543e6c2b..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes_state.go +++ /dev/null @@ -1,15 +0,0 @@ -package plans - -import ( - "github.com/hashicorp/terraform/states" -) - -// PlannedState merges the set of changes described by the receiver into the -// given prior state to produce the planned result state. -// -// The result is an approximation of the state as it would exist after -// applying these changes, omitting any values that cannot be determined until -// the changes are actually applied. -func (c *Changes) PlannedState(prior *states.State) (*states.State, error) { - panic("Changes.PlannedState not yet implemented") -} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go deleted file mode 100644 index 6b4ff98f..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go +++ /dev/null @@ -1,144 +0,0 @@ -package plans - -import ( - "fmt" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" -) - -// ChangesSync is a wrapper around a Changes that provides a concurrency-safe -// interface to insert new changes and retrieve copies of existing changes. -// -// Each ChangesSync is independent of all others, so all concurrent writers -// to a particular Changes must share a single ChangesSync. Behavior is -// undefined if any other caller makes changes to the underlying Changes -// object or its nested objects concurrently with any of the methods of a -// particular ChangesSync. -type ChangesSync struct { - lock sync.Mutex - changes *Changes -} - -// AppendResourceInstanceChange records the given resource instance change in -// the set of planned resource changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { - if cs == nil { - panic("AppendResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Resources = append(cs.changes.Resources, s) -} - -// GetResourceInstanceChange searches the set of resource instance changes for -// one matching the given address and generation, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { - if cs == nil { - panic("GetResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - if gen == states.CurrentGen { - return cs.changes.ResourceInstance(addr).DeepCopy() - } - if dk, ok := gen.(states.DeposedKey); ok { - return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() - } - panic(fmt.Sprintf("unsupported generation value %#v", gen)) -} - -// RemoveResourceInstanceChange searches the set of resource instance changes -// for one matching the given address and generation, and removes it from the -// set if it exists. -func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { - if cs == nil { - panic("RemoveResourceInstanceChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - dk := states.NotDeposed - if realDK, ok := gen.(states.DeposedKey); ok { - dk = realDK - } - - addrStr := addr.String() - for i, r := range cs.changes.Resources { - if r.Addr.String() != addrStr || r.DeposedKey != dk { - continue - } - copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) - cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] - return - } -} - -// AppendOutputChange records the given output value change in the set of -// planned value changes. -// -// The caller must ensure that there are no concurrent writes to the given -// change while this method is running, but it is safe to resume mutating -// it after this method returns without affecting the saved change. -func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { - if cs == nil { - panic("AppendOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - s := changeSrc.DeepCopy() - cs.changes.Outputs = append(cs.changes.Outputs, s) -} - -// GetOutputChange searches the set of output value changes for one matching -// the given address, returning it if it exists. -// -// If no such change exists, nil is returned. -// -// The returned object is a deep copy of the change recorded in the plan, so -// callers may mutate it although it's generally better (less confusing) to -// treat planned changes as immutable after they've been initially constructed. -func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { - if cs == nil { - panic("GetOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - return cs.changes.OutputValue(addr) -} - -// RemoveOutputChange searches the set of output value changes for one matching -// the given address, and removes it from the set if it exists. -func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { - if cs == nil { - panic("RemoveOutputChange on nil ChangesSync") - } - cs.lock.Lock() - defer cs.lock.Unlock() - - addrStr := addr.String() - for i, o := range cs.changes.Outputs { - if o.Addr.String() != addrStr { - continue - } - copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) - cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] - return - } -} diff --git a/vendor/github.com/hashicorp/terraform/plans/doc.go b/vendor/github.com/hashicorp/terraform/plans/doc.go deleted file mode 100644 index 01ca3892..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package plans contains the types that are used to represent Terraform plans. -// -// A plan describes a set of changes that Terraform will make to update remote -// objects to match with changes to the configuration. -package plans diff --git a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go deleted file mode 100644 index 51fbb24c..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go +++ /dev/null @@ -1,96 +0,0 @@ -package plans - -import ( - "github.com/zclconf/go-cty/cty" - ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" -) - -// DynamicValue is the representation in the plan of a value whose type cannot -// be determined at compile time, such as because it comes from a schema -// defined in a plugin. -// -// This type is used as an indirection so that the overall plan structure can -// be decoded without schema available, and then the dynamic values accessed -// at a later time once the appropriate schema has been determined. -// -// Internally, DynamicValue is a serialized version of a cty.Value created -// against a particular type constraint. Callers should not access directly -// the serialized form, whose format may change in future. Values of this -// type must always be created by calling NewDynamicValue. -// -// The zero value of DynamicValue is nil, and represents the absense of a -// value within the Go type system. This is distinct from a cty.NullVal -// result, which represents the absense of a value within the cty type system. -type DynamicValue []byte - -// NewDynamicValue creates a DynamicValue by serializing the given value -// against the given type constraint. The value must conform to the type -// constraint, or the result is undefined. -// -// If the value to be encoded has no predefined schema (for example, for -// module output values and input variables), set the type constraint to -// cty.DynamicPseudoType in order to save type information as part of the -// value, and then also pass cty.DynamicPseudoType to method Decode to recover -// the original value. -// -// cty.NilVal can be used to represent the absense of a value, but callers -// must be careful to distinguish values that are absent at the Go layer -// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal -// results). -func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { - // If we're given cty.NilVal (the zero value of cty.Value, which is - // distinct from a typed null value created by cty.NullVal) then we'll - // assume the caller is trying to represent the _absense_ of a value, - // and so we'll return a nil DynamicValue. - if val == cty.NilVal { - return DynamicValue(nil), nil - } - - // Currently our internal encoding is msgpack, via ctymsgpack. - buf, err := ctymsgpack.Marshal(val, ty) - if err != nil { - return nil, err - } - - return DynamicValue(buf), nil -} - -// Decode retrieves the effective value from the receiever by interpreting the -// serialized form against the given type constraint. For correct results, -// the type constraint must match (or be consistent with) the one that was -// used to create the receiver. -// -// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and -// instead represents the absense of a value. -func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { - if v == nil { - return cty.NilVal, nil - } - - return ctymsgpack.Unmarshal([]byte(v), ty) -} - -// ImpliedType returns the type implied by the serialized structure of the -// receiving value. -// -// This will not necessarily be exactly the type that was given when the -// value was encoded, and in particular must not be used for values that -// were encoded with their static type given as cty.DynamicPseudoType. -// It is however safe to use this method for values that were encoded using -// their runtime type as the conforming type, with the result being -// semantically equivalent but with all lists and sets represented as tuples, -// and maps as objects, due to ambiguities of the serialization. -func (v DynamicValue) ImpliedType() (cty.Type, error) { - return ctymsgpack.ImpliedType([]byte(v)) -} - -// Copy produces a copy of the receiver with a distinct backing array. -func (v DynamicValue) Copy() DynamicValue { - if v == nil { - return nil - } - - ret := make(DynamicValue, len(v)) - copy(ret, v) - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go deleted file mode 100644 index 18a7e99a..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go +++ /dev/null @@ -1,18 +0,0 @@ -package objchange - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// AllAttributesNull constructs a non-null cty.Value of the object type implied -// by the given schema that has all of its leaf attributes set to null and all -// of its nested block collections set to zero-length. -// -// This simulates what would result from decoding an empty configuration block -// with the given schema, except that it does not produce errors -func AllAttributesNull(schema *configschema.Block) cty.Value { - // "All attributes null" happens to be the definition of EmptyValue for - // a Block, so we can just delegate to that. - return schema.EmptyValue() -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go deleted file mode 100644 index d85086c9..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go +++ /dev/null @@ -1,447 +0,0 @@ -package objchange - -import ( - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// AssertObjectCompatible checks whether the given "actual" value is a valid -// completion of the possibly-partially-unknown "planned" value. -// -// This means that any known leaf value in "planned" must be equal to the -// corresponding value in "actual", and various other similar constraints. -// -// Any inconsistencies are reported by returning a non-zero number of errors. -// These errors are usually (but not necessarily) cty.PathError values -// referring to a particular nested value within the "actual" value. -// -// The two values must have types that conform to the given schema's implied -// type, or this function will panic. -func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { - return assertObjectCompatible(schema, planned, actual, nil) -} - -func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { - var errs []error - if planned.IsNull() && !actual.IsNull() { - errs = append(errs, path.NewErrorf("was absent, but now present")) - return errs - } - if actual.IsNull() && !planned.IsNull() { - errs = append(errs, path.NewErrorf("was present, but now absent")) - return errs - } - if planned.IsNull() { - // No further checks possible if both values are null - return errs - } - - for name, attrS := range schema.Attributes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertValueCompatible(plannedV, actualV, path) - if attrS.Sensitive { - if len(moreErrs) > 0 { - // Use a vague placeholder message instead, to avoid disclosing - // sensitive information. - errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) - } - } else { - errs = append(errs, moreErrs...) - } - } - for name, blockS := range schema.BlockTypes { - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - - // As a special case, if there were any blocks whose leaf attributes - // are all unknown then we assume (possibly incorrectly) that the - // HCL dynamic block extension is in use with an unknown for_each - // argument, and so we will do looser validation here that allows - // for those blocks to have expanded into a different number of blocks - // if the for_each value is now known. - maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) - - path := append(path, cty.GetAttrStep{Name: name}) - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - // If an unknown block placeholder was present then the placeholder - // may have expanded out into zero blocks, which is okay. - if maybeUnknownBlocks && actualV.IsNull() { - continue - } - moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - if maybeUnknownBlocks { - // When unknown blocks are present the final blocks may be - // at different indices than the planned blocks, so unfortunately - // we can't do our usual checks in this case without generating - // false negatives. - continue - } - - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL { - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so both values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - actualAtys := actualV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := actualAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q has vanished", k)) - continue - } - - plannedEV := plannedV.GetAttr(k) - actualEV := actualV.GetAttr(k) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) - errs = append(errs, moreErrs...) - } - if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan - for k := range actualAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) - continue - } - } - } - } else { - if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan - errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - if !actualV.HasIndex(idx).True() { - continue - } - actualEV := actualV.Index(idx) - moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) - errs = append(errs, moreErrs...) - } - } - case configschema.NestingSet: - if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { - continue - } - - setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { - errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - if maybeUnknownBlocks { - // When unknown blocks are present the final number of blocks - // may be different, either because the unknown set values - // become equal and are collapsed, or the count is unknown due - // a dynamic block. Unfortunately this means we can't do our - // usual checks in this case without generating false - // negatives. - continue - } - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - plannedL := plannedV.LengthInt() - actualL := actualV.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) - } - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - return errs -} - -func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { - // NOTE: We don't normally use the GoString rendering of cty.Value in - // user-facing error messages as a rule, but we make an exception - // for this function because we expect the user to pass this message on - // verbatim to the provider development team and so more detail is better. - - var errs []error - if planned.Type() == cty.DynamicPseudoType { - // Anything goes, then - return errs - } - if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { - errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) - // If the types don't match then we can't do any other comparisons, - // so we bail early. - return errs - } - - if !planned.IsKnown() { - // We didn't know what were going to end up with during plan, so - // anything goes during apply. - return errs - } - - if actual.IsNull() { - if planned.IsNull() { - return nil - } - errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) - return errs - } - if planned.IsNull() { - errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) - return errs - } - - ty := planned.Type() - switch { - - case !actual.IsKnown(): - errs = append(errs, path.NewErrorf("was known, but now unknown")) - - case ty.IsPrimitiveType(): - if !actual.Equals(planned).True() { - errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) - } - - case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): - for it := planned.ElementIterator(); it.Next(); { - k, plannedV := it.Element() - if !actual.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) - continue - } - - actualV := actual.Index(k) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) - errs = append(errs, moreErrs...) - } - - for it := actual.ElementIterator(); it.Next(); { - k, _ := it.Element() - if !planned.HasIndex(k).True() { - errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) - } - } - - case ty.IsObjectType(): - atys := ty.AttributeTypes() - for name := range atys { - // Because we already tested that the two values have the same type, - // we can assume that the same attributes are present in both and - // focus just on testing their values. - plannedV := planned.GetAttr(name) - actualV := actual.GetAttr(name) - moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) - errs = append(errs, moreErrs...) - } - - case ty.IsSetType(): - // We can't really do anything useful for sets here because changing - // an unknown element to known changes the identity of the element, and - // so we can't correlate them properly. However, we will at least check - // to ensure that the number of elements is consistent, along with - // the general type-match checks we ran earlier in this function. - if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { - - setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { - errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) - return len(errs) == 0 - }) - errs = append(errs, setErrs...) - - // There can be fewer elements in a set after its elements are all - // known (values that turn out to be equal will coalesce) but the - // number of elements must never get larger. - - plannedL := planned.LengthInt() - actualL := actual.LengthInt() - if plannedL < actualL { - errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) - } - } - } - - return errs -} - -func indexStrForErrors(v cty.Value) string { - switch v.Type() { - case cty.Number: - return v.AsBigFloat().Text('f', -1) - case cty.String: - return strconv.Quote(v.AsString()) - default: - // Should be impossible, since no other index types are allowed! - return fmt.Sprintf("%#v", v) - } -} - -// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the -// HCL dynamic block extension behaves when it's asked to expand a block whose -// for_each argument is unknown. In such cases, it generates a single placeholder -// block with all leaf attribute values unknown, and once the for_each -// expression becomes known the placeholder may be replaced with any number -// of blocks, so object compatibility checks would need to be more liberal. -// -// Set "nested" if testing a block that is nested inside a candidate block -// placeholder; this changes the interpretation of there being no blocks of -// a type to allow for there being zero nested blocks. -func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - if nested && v.IsNull() { - return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder - } - return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) - default: - // These situations should be impossible for correct providers, but - // we permit the legacy SDK to produce some incorrect outcomes - // for compatibility with its existing logic, and so we must be - // tolerant here. - if !v.IsKnown() { - return true - } - if v.IsNull() { - return false // treated as if the list were empty, so we would see zero iterations below - } - - // For all other nesting modes, our value should be something iterable. - for it := v.ElementIterator(); it.Next(); { - _, ev := it.Element() - if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { - return true - } - } - - // Our default changes depending on whether we're testing the candidate - // block itself or something nested inside of it: zero blocks of a type - // can never contain a dynamic block placeholder, but a dynamic block - // placeholder might contain zero blocks of one of its own nested block - // types, if none were set in the config at all. - return nested - } -} - -func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { - if v.IsNull() { - return false // null value can never be a placeholder element - } - if !v.IsKnown() { - return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs - } - for name := range schema.Attributes { - av := v.GetAttr(name) - - // Unknown block placeholders contain only unknown or null attribute - // values, depending on whether or not a particular attribute was set - // explicitly inside the content block. Note that this is imprecise: - // non-placeholders can also match this, so this function can generate - // false positives. - if av.IsKnown() && !av.IsNull() { - return false - } - } - for name, blockS := range schema.BlockTypes { - if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { - return false - } - } - return true -} - -// assertSetValuesCompatible checks that each of the elements in a can -// be correlated with at least one equivalent element in b and vice-versa, -// using the given correlation function. -// -// This allows the number of elements in the sets to change as long as all -// elements in both sets can be correlated, making this function safe to use -// with sets that may contain unknown values as long as the unknown case is -// addressed in some reasonable way in the callback function. -// -// The callback always recieves values from set a as its first argument and -// values from set b in its second argument, so it is safe to use with -// non-commutative functions. -// -// As with assertValueCompatible, we assume that the target audience of error -// messages here is a provider developer (via a bug report from a user) and so -// we intentionally violate our usual rule of keeping cty implementation -// details out of error messages. -func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { - a := planned - b := actual - - // Our methodology here is a little tricky, to deal with the fact that - // it's impossible to directly correlate two non-equal set elements because - // they don't have identities separate from their values. - // The approach is to count the number of equivalent elements each element - // of a has in b and vice-versa, and then return true only if each element - // in both sets has at least one equivalent. - as := a.AsValueSlice() - bs := b.AsValueSlice() - aeqs := make([]bool, len(as)) - beqs := make([]bool, len(bs)) - for ai, av := range as { - for bi, bv := range bs { - if f(av, bv) { - aeqs[ai] = true - beqs[bi] = true - } - } - } - - var errs []error - for i, eq := range aeqs { - if !eq { - errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) - } - } - if len(errs) > 0 { - // Exit early since otherwise we're likely to generate duplicate - // error messages from the other perspective in the subsequent loop. - return errs - } - for i, eq := range beqs { - if !eq { - errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) - } - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go deleted file mode 100644 index 2c18a010..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package objchange deals with the business logic of taking a prior state -// value and a config value and producing a proposed new merged value, along -// with other related rules in this domain. -package objchange diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go deleted file mode 100644 index cbfefddd..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go +++ /dev/null @@ -1,104 +0,0 @@ -package objchange - -import ( - "github.com/zclconf/go-cty/cty" -) - -// LongestCommonSubsequence finds a sequence of values that are common to both -// x and y, with the same relative ordering as in both collections. This result -// is useful as a first step towards computing a diff showing added/removed -// elements in a sequence. -// -// The approached used here is a "naive" one, assuming that both xs and ys will -// generally be small in most reasonable Terraform configurations. For larger -// lists the time/space usage may be sub-optimal. -// -// A pair of lists may have multiple longest common subsequences. In that -// case, the one selected by this function is undefined. -func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { - if len(xs) == 0 || len(ys) == 0 { - return make([]cty.Value, 0) - } - - c := make([]int, len(xs)*len(ys)) - eqs := make([]bool, len(xs)*len(ys)) - w := len(xs) - - for y := 0; y < len(ys); y++ { - for x := 0; x < len(xs); x++ { - eqV := xs[x].Equals(ys[y]) - eq := false - if eqV.IsKnown() && eqV.True() { - eq = true - eqs[(w*y)+x] = true // equality tests can be expensive, so cache it - } - if eq { - // Sequence gets one longer than for the cell at top left, - // since we'd append a new item to the sequence here. - if x == 0 || y == 0 { - c[(w*y)+x] = 1 - } else { - c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 - } - } else { - // We follow the longest of the sequence above and the sequence - // to the left of us in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - c[(w*y)+x] = l - } else { - c[(w*y)+x] = u - } - } - } - } - - // The bottom right cell tells us how long our longest sequence will be - seq := make([]cty.Value, c[len(c)-1]) - - // Now we will walk back from the bottom right cell, finding again all - // of the equal pairs to construct our sequence. - x := len(xs) - 1 - y := len(ys) - 1 - i := len(seq) - 1 - - for x > -1 && y > -1 { - if eqs[(w*y)+x] { - // Add the value to our result list and then walk diagonally - // up and to the left. - seq[i] = xs[x] - x-- - y-- - i-- - } else { - // Take the path with the greatest sequence length in the matrix. - l := 0 - u := 0 - if x > 0 { - l = c[(w*y)+(x-1)] - } - if y > 0 { - u = c[(w*(y-1))+x] - } - if l > u { - x-- - } else { - y-- - } - } - } - - if i > -1 { - // should never happen if the matrix was constructed properly - panic("not enough elements in sequence") - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go deleted file mode 100644 index 5a8af148..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go +++ /dev/null @@ -1,390 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// ProposedNewObject constructs a proposed new object value by combining the -// computed attribute values from "prior" with the configured attribute values -// from "config". -// -// Both value must conform to the given schema's implied type, or this function -// will panic. -// -// The prior value must be wholly known, but the config value may be unknown -// or have nested unknown values. -// -// The merging of the two objects includes the attributes of any nested blocks, -// which will be correlated in a manner appropriate for their nesting mode. -// Note in particular that the correlation for blocks backed by sets is a -// heuristic based on matching non-computed attribute values and so it may -// produce strange results with more "extreme" cases, such as a nested set -// block where _all_ attributes are computed. -func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - // If the config and prior are both null, return early here before - // populating the prior block. The prevents non-null blocks from appearing - // the proposed state value. - if config.IsNull() && prior.IsNull() { - return prior - } - - if prior.IsNull() { - // In this case, we will construct a synthetic prior value that is - // similar to the result of decoding an empty configuration block, - // which simplifies our handling of the top-level attributes/blocks - // below by giving us one non-null level of object to pull values from. - prior = AllAttributesNull(schema) - } - return proposedNewObject(schema, prior, config) -} - -// PlannedDataResourceObject is similar to ProposedNewObject but tailored for -// planning data resources in particular. Specifically, it replaces the values -// of any Computed attributes not set in the configuration with an unknown -// value, which serves as a placeholder for a value to be filled in by the -// provider when the data resource is finally read. -// -// Data resources are different because the planning of them is handled -// entirely within Terraform Core and not subject to customization by the -// provider. This function is, in effect, producing an equivalent result to -// passing the ProposedNewObject result into a provider's PlanResourceChange -// function, assuming a fixed implementation of PlanResourceChange that just -// fills in unknown values as needed. -func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { - // Our trick here is to run the ProposedNewObject logic with an - // entirely-unknown prior value. Because of cty's unknown short-circuit - // behavior, any operation on prior returns another unknown, and so - // unknown values propagate into all of the parts of the resulting value - // that would normally be filled in by preserving the prior state. - prior := cty.UnknownVal(schema.ImpliedType()) - return proposedNewObject(schema, prior, config) -} - -func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { - if config.IsNull() || !config.IsKnown() { - // This is a weird situation, but we'll allow it anyway to free - // callers from needing to specifically check for these cases. - return prior - } - if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { - panic("ProposedNewObject only supports object-typed values") - } - - // From this point onwards, we can assume that both values are non-null - // object types, and that the config value itself is known (though it - // may contain nested values that are unknown.) - - newAttrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch { - case attr.Computed && attr.Optional: - // This is the trickiest scenario: we want to keep the prior value - // if the config isn't overriding it. Note that due to some - // ambiguity here, setting an optional+computed attribute from - // config and then later switching the config to null in a - // subsequent change causes the initial config value to be "sticky" - // unless the provider specifically overrides it during its own - // plan customization step. - if configV.IsNull() { - newV = priorV - } else { - newV = configV - } - case attr.Computed: - // configV will always be null in this case, by definition. - // priorV may also be null, but that's okay. - newV = priorV - default: - // For non-computed attributes, we always take the config value, - // even if it is null. If it's _required_ then null values - // should've been caught during an earlier validation step, and - // so we don't really care about that here. - newV = configV - } - newAttrs[name] = newV - } - - // Merging nested blocks is a little more complex, since we need to - // correlate blocks between both objects and then recursively propose - // a new object for each. The correlation logic depends on the nesting - // mode for each block type. - for name, blockType := range schema.BlockTypes { - priorV := prior.GetAttr(name) - configV := config.GetAttr(name) - var newV cty.Value - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - newV = ProposedNewObject(&blockType.Block, priorV, configV) - - case configschema.NestingList: - // Nested blocks are correlated by index. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals = append(newVals, configEV) - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.TupleVal(newVals) - } else { - newV = cty.ListVal(newVals) - } - } else { - // Despite the name, a NestingList might also be a tuple, if - // its nested schema contains dynamically-typed attributes. - if configV.Type().IsTupleType() { - newV = cty.EmptyTupleVal - } else { - newV = cty.ListValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingMap: - // Despite the name, a NestingMap may produce either a map or - // object value, depending on whether the nested schema contains - // dynamically-typed attributes. - if configV.Type().IsObjectType() { - // Nested blocks are correlated by key. - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - atys := configV.Type().AttributeTypes() - for name := range atys { - configEV := configV.GetAttr(name) - if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[name] = configEV - continue - } - priorEV := priorV.GetAttr(name) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[name] = newEV - } - // Although we call the nesting mode "map", we actually use - // object values so that elements might have different types - // in case of dynamically-typed attributes. - newV = cty.ObjectVal(newVals) - } else { - newV = cty.EmptyObjectVal - } - } else { - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - newVals := make(map[string]cty.Value, configVLen) - for it := configV.ElementIterator(); it.Next(); { - idx, configEV := it.Element() - k := idx.AsString() - if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { - // If there is no corresponding prior element then - // we just take the config value as-is. - newVals[k] = configEV - continue - } - priorEV := priorV.Index(idx) - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals[k] = newEV - } - newV = cty.MapVal(newVals) - } else { - newV = cty.MapValEmpty(blockType.ImpliedType()) - } - } - - case configschema.NestingSet: - if !configV.Type().IsSetType() { - panic("configschema.NestingSet value is not a set as expected") - } - - // Nested blocks are correlated by comparing the element values - // after eliminating all of the computed attributes. In practice, - // this means that any config change produces an entirely new - // nested object, and we only propagate prior computed values - // if the non-computed attribute values are identical. - var cmpVals [][2]cty.Value - if priorV.IsKnown() && !priorV.IsNull() { - cmpVals = setElementCompareValues(&blockType.Block, priorV, false) - } - configVLen := 0 - if configV.IsKnown() && !configV.IsNull() { - configVLen = configV.LengthInt() - } - if configVLen > 0 { - used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value - newVals := make([]cty.Value, 0, configVLen) - for it := configV.ElementIterator(); it.Next(); { - _, configEV := it.Element() - var priorEV cty.Value - for i, cmp := range cmpVals { - if used[i] { - continue - } - if cmp[1].RawEquals(configEV) { - priorEV = cmp[0] - used[i] = true // we can't use this value on a future iteration - break - } - } - if priorEV == cty.NilVal { - priorEV = cty.NullVal(blockType.ImpliedType()) - } - - newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) - newVals = append(newVals, newEV) - } - newV = cty.SetVal(newVals) - } else { - newV = cty.SetValEmpty(blockType.Block.ImpliedType()) - } - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - - newAttrs[name] = newV - } - - return cty.ObjectVal(newAttrs) -} - -// setElementCompareValues takes a known, non-null value of a cty.Set type and -// returns a table -- constructed of two-element arrays -- that maps original -// set element values to corresponding values that have all of the computed -// values removed, making them suitable for comparison with values obtained -// from configuration. The element type of the set must conform to the implied -// type of the given schema, or this function will panic. -// -// In the resulting slice, the zeroth element of each array is the original -// value and the one-indexed element is the corresponding "compare value". -// -// This is intended to help correlate prior elements with configured elements -// in ProposedNewObject. The result is a heuristic rather than an exact science, -// since e.g. two separate elements may reduce to the same value through this -// process. The caller must therefore be ready to deal with duplicates. -func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { - ret := make([][2]cty.Value, 0, set.LengthInt()) - for it := set.ElementIterator(); it.Next(); { - _, ev := it.Element() - ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) - } - return ret -} - -// setElementCompareValue creates a new value that has all of the same -// non-computed attribute values as the one given but has all computed -// attribute values forced to null. -// -// If isConfig is true then non-null Optional+Computed attribute values will -// be preserved. Otherwise, they will also be set to null. -// -// The input value must conform to the schema's implied type, and the return -// value is guaranteed to conform to it. -func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { - if v.IsNull() || !v.IsKnown() { - return v - } - - attrs := map[string]cty.Value{} - for name, attr := range schema.Attributes { - switch { - case attr.Computed && attr.Optional: - if isConfig { - attrs[name] = v.GetAttr(name) - } else { - attrs[name] = cty.NullVal(attr.Type) - } - case attr.Computed: - attrs[name] = cty.NullVal(attr.Type) - default: - attrs[name] = v.GetAttr(name) - } - } - - for name, blockType := range schema.BlockTypes { - switch blockType.Nesting { - - case configschema.NestingSingle, configschema.NestingGroup: - attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) - - case configschema.NestingList, configschema.NestingSet: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - if l := cv.LengthInt(); l > 0 { - elems := make([]cty.Value, 0, l) - for it := cv.ElementIterator(); it.Next(); { - _, ev := it.Element() - elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) - } - if blockType.Nesting == configschema.NestingSet { - // SetValEmpty would panic if given elements that are not - // all of the same type, but that's guaranteed not to - // happen here because our input value was _already_ a - // set and we've not changed the types of any elements here. - attrs[name] = cty.SetVal(elems) - } else { - attrs[name] = cty.TupleVal(elems) - } - } else { - if blockType.Nesting == configschema.NestingSet { - attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) - } else { - attrs[name] = cty.EmptyTupleVal - } - } - - case configschema.NestingMap: - cv := v.GetAttr(name) - if cv.IsNull() || !cv.IsKnown() { - attrs[name] = cv - continue - } - elems := make(map[string]cty.Value) - for it := cv.ElementIterator(); it.Next(); { - kv, ev := it.Element() - elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) - } - attrs[name] = cty.ObjectVal(elems) - - default: - // Should never happen, since the above cases are comprehensive. - panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) - } - } - - return cty.ObjectVal(attrs) -} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go deleted file mode 100644 index 69acb897..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go +++ /dev/null @@ -1,267 +0,0 @@ -package objchange - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// AssertPlanValid checks checks whether a planned new state returned by a -// provider's PlanResourceChange method is suitable to achieve a change -// from priorState to config. It returns a slice with nonzero length if -// any problems are detected. Because problems here indicate bugs in the -// provider that generated the plannedState, they are written with provider -// developers as an audience, rather than end-users. -// -// All of the given values must have the same type and must conform to the -// implied type of the given schema, or this function may panic or produce -// garbage results. -// -// During planning, a provider may only make changes to attributes that are -// null (unset) in the configuration and are marked as "computed" in the -// resource type schema, in order to insert any default values the provider -// may know about. If the default value cannot be determined until apply time, -// the provider can return an unknown value. Providers are forbidden from -// planning a change that disagrees with any non-null argument in the -// configuration. -// -// As a special exception, providers _are_ allowed to provide attribute values -// conflicting with configuration if and only if the planned value exactly -// matches the corresponding attribute value in the prior state. The provider -// can use this to signal that the new value is functionally equivalent to -// the old and thus no change is required. -func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { - return assertPlanValid(schema, priorState, config, plannedState, nil) -} - -func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { - var errs []error - if plannedState.IsNull() && !config.IsNull() { - errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) - return errs - } - if config.IsNull() && !plannedState.IsNull() { - errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) - return errs - } - if plannedState.IsNull() { - // No further checks possible if the planned value is null - return errs - } - - impTy := schema.ImpliedType() - - for name, attrS := range schema.Attributes { - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(attrS.Type) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - - path := append(path, cty.GetAttrStep{Name: name}) - moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - } - for name, blockS := range schema.BlockTypes { - path := append(path, cty.GetAttrStep{Name: name}) - plannedV := plannedState.GetAttr(name) - configV := config.GetAttr(name) - priorV := cty.NullVal(impTy.AttributeType(name)) - if !priorState.IsNull() { - priorV = priorState.GetAttr(name) - } - if plannedV.RawEquals(configV) { - // Easy path: nothing has changed at all - continue - } - if !plannedV.IsKnown() { - errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - - switch blockS.Nesting { - case configschema.NestingSingle, configschema.NestingGroup: - moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) - errs = append(errs, moreErrs...) - case configschema.NestingList: - // A NestingList might either be a list or a tuple, depending on - // whether there are dynamically-typed attributes inside. However, - // both support a similar-enough API that we can treat them the - // same for our purposes here. - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - if !configV.HasIndex(idx).True() { - continue // should never happen since we checked the lengths above - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - case configschema.NestingMap: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // A NestingMap might either be a map or an object, depending on - // whether there are dynamically-typed attributes inside, but - // that's decided statically and so all values will have the same - // kind. - if plannedV.Type().IsObjectType() { - plannedAtys := plannedV.Type().AttributeTypes() - configAtys := configV.Type().AttributeTypes() - for k := range plannedAtys { - if _, ok := configAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - path := append(path, cty.GetAttrStep{Name: k}) - - plannedEV := plannedV.GetAttr(k) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - configEV := configV.GetAttr(k) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.Type().HasAttribute(k) { - priorEV = priorV.GetAttr(k) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for k := range configAtys { - if _, ok := plannedAtys[k]; !ok { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) - continue - } - } - } else { - plannedL := plannedV.LengthInt() - configL := configV.LengthInt() - if plannedL != configL { - errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) - continue - } - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - k := idx.AsString() - if !configV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) - continue - } - configEV := configV.Index(idx) - priorEV := cty.NullVal(blockS.ImpliedType()) - if !priorV.IsNull() && priorV.HasIndex(idx).True() { - priorEV = priorV.Index(idx) - } - moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) - errs = append(errs, moreErrs...) - } - for it := configV.ElementIterator(); it.Next(); { - idx, _ := it.Element() - if !plannedV.HasIndex(idx).True() { - errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) - continue - } - } - } - case configschema.NestingSet: - if plannedV.IsNull() { - errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) - continue - } - - // Because set elements have no identifier with which to correlate - // them, we can't robustly validate the plan for a nested block - // backed by a set, and so unfortunately we need to just trust the - // provider to do the right thing. :( - // - // (In principle we could correlate elements by matching the - // subset of attributes explicitly set in config, except for the - // special diff suppression rule which allows for there to be a - // planned value that is constructed by mixing part of a prior - // value with part of a config value, creating an entirely new - // element that is not present in either prior nor config.) - for it := plannedV.ElementIterator(); it.Next(); { - idx, plannedEV := it.Element() - path := append(path, cty.IndexStep{Key: idx}) - if !plannedEV.IsKnown() { - errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) - continue - } - } - - default: - panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) - } - } - - return errs -} - -func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { - var errs []error - if plannedV.RawEquals(configV) { - // This is the easy path: provider didn't change anything at all. - return errs - } - if plannedV.RawEquals(priorV) && !priorV.IsNull() { - // Also pretty easy: there is a prior value and the provider has - // returned it unchanged. This indicates that configV and plannedV - // are functionally equivalent and so the provider wishes to disregard - // the configuration value in favor of the prior. - return errs - } - if attrS.Computed && configV.IsNull() { - // The provider is allowed to change the value of any computed - // attribute that isn't explicitly set in the config. - return errs - } - - // If none of the above conditions match, the provider has made an invalid - // change to this attribute. - if priorV.IsNull() { - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) - } - return errs - } - if attrS.Sensitive { - errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) - } else { - errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) - } - return errs -} diff --git a/vendor/github.com/hashicorp/terraform/plans/plan.go b/vendor/github.com/hashicorp/terraform/plans/plan.go deleted file mode 100644 index 5a3e4548..00000000 --- a/vendor/github.com/hashicorp/terraform/plans/plan.go +++ /dev/null @@ -1,92 +0,0 @@ -package plans - -import ( - "sort" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// Plan is the top-level type representing a planned set of changes. -// -// A plan is a summary of the set of changes required to move from a current -// state to a goal state derived from configuration. The described changes -// are not applied directly, but contain an approximation of the final -// result that will be completed during apply by resolving any values that -// cannot be predicted. -// -// A plan must always be accompanied by the state and configuration it was -// built from, since the plan does not itself include all of the information -// required to make the changes indicated. -type Plan struct { - VariableValues map[string]DynamicValue - Changes *Changes - TargetAddrs []addrs.Targetable - ProviderSHA256s map[string][]byte - Backend Backend -} - -// Backend represents the backend-related configuration and other data as it -// existed when a plan was created. -type Backend struct { - // Type is the type of backend that the plan will apply against. - Type string - - // Config is the configuration of the backend, whose schema is decided by - // the backend Type. - Config DynamicValue - - // Workspace is the name of the workspace that was active when the plan - // was created. It is illegal to apply a plan created for one workspace - // to the state of another workspace. - // (This constraint is already enforced by the statefile lineage mechanism, - // but storing this explicitly allows us to return a better error message - // in the situation where the user has the wrong workspace selected.) - Workspace string -} - -func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { - dv, err := NewDynamicValue(config, configSchema.ImpliedType()) - if err != nil { - return nil, err - } - - return &Backend{ - Type: typeName, - Config: dv, - Workspace: workspaceName, - }, nil -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving plan. -// -// The result is de-duplicated so that each distinct address appears only once. -func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { - if p == nil || p.Changes == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, rc := range p.Changes.Resources { - m[rc.ProviderAddr.String()] = rc.ProviderAddr - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go deleted file mode 100644 index f20f913f..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/client.go +++ /dev/null @@ -1,40 +0,0 @@ -package plugin - -import ( - "os" - "os/exec" - - hclog "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/plugin/discovery" -) - -// The TF_DISABLE_PLUGIN_TLS environment variable is intended only for use by -// the plugin SDK test framework. We do not recommend Terraform CLI end-users -// set this variable. -var enableAutoMTLS = os.Getenv("TF_DISABLE_PLUGIN_TLS") == "" - -// ClientConfig returns a configuration object that can be used to instantiate -// a client for the plugin described by the given metadata. -func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { - logger := hclog.New(&hclog.LoggerOptions{ - Name: "plugin", - Level: hclog.Trace, - Output: os.Stderr, - }) - - return &plugin.ClientConfig{ - Cmd: exec.Command(m.Path), - HandshakeConfig: Handshake, - VersionedPlugins: VersionedPlugins, - Managed: true, - Logger: logger, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - AutoMTLS: enableAutoMTLS, - } -} - -// Client returns a plugin client for the plugin described by the given metadata. -func Client(m discovery.PluginMeta) *plugin.Client { - return plugin.NewClient(ClientConfig(m)) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go deleted file mode 100644 index 51cb2fe2..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go +++ /dev/null @@ -1,132 +0,0 @@ -package convert - -import ( - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// WarnsAndErrorsToProto converts the warnings and errors return by the legacy -// provider to protobuf diagnostics. -func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { - for _, w := range warns { - diags = AppendProtoDiag(diags, w) - } - - for _, e := range errs { - diags = AppendProtoDiag(diags, e) - } - - return diags -} - -// AppendProtoDiag appends a new diagnostic from a warning string or an error. -// This panics if d is not a string or error. -func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { - switch d := d.(type) { - case cty.PathError: - ap := PathToAttributePath(d.Path) - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - Attribute: ap, - }) - case error: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: d.Error(), - }) - case string: - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_WARNING, - Summary: d, - }) - case *proto.Diagnostic: - diags = append(diags, d) - case []*proto.Diagnostic: - diags = append(diags, d...) - } - return diags -} - -// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. -func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, d := range ds { - var severity tfdiags.Severity - - switch d.Severity { - case proto.Diagnostic_ERROR: - severity = tfdiags.Error - case proto.Diagnostic_WARNING: - severity = tfdiags.Warning - } - - var newDiag tfdiags.Diagnostic - - // if there's an attribute path, we need to create a AttributeValue diagnostic - if d.Attribute != nil { - path := AttributePathToPath(d.Attribute) - newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) - } else { - newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) - } - - diags = diags.Append(newDiag) - } - - return diags -} - -// AttributePathToPath takes the proto encoded path and converts it to a cty.Path -func AttributePathToPath(ap *proto.AttributePath) cty.Path { - var p cty.Path - for _, step := range ap.Steps { - switch selector := step.Selector.(type) { - case *proto.AttributePath_Step_AttributeName: - p = p.GetAttr(selector.AttributeName) - case *proto.AttributePath_Step_ElementKeyString: - p = p.Index(cty.StringVal(selector.ElementKeyString)) - case *proto.AttributePath_Step_ElementKeyInt: - p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) - } - } - return p -} - -// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. -func PathToAttributePath(p cty.Path) *proto.AttributePath { - ap := &proto.AttributePath{} - for _, step := range p { - switch selector := step.(type) { - case cty.GetAttrStep: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: selector.Name, - }, - }) - case cty.IndexStep: - key := selector.Key - switch key.Type() { - case cty.String: - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: key.AsString(), - }, - }) - case cty.Number: - v, _ := key.AsBigFloat().Int64() - ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: v, - }, - }) - default: - // We'll bail early if we encounter anything else, and just - // return the valid prefix. - return ap - } - } - } - return ap -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go deleted file mode 100644 index 6a45f54c..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go +++ /dev/null @@ -1,154 +0,0 @@ -package convert - -import ( - "encoding/json" - "reflect" - "sort" - - "github.com/hashicorp/terraform/configs/configschema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/providers" -) - -// ConfigSchemaToProto takes a *configschema.Block and converts it to a -// proto.Schema_Block for a grpc response. -func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { - block := &proto.Schema_Block{} - - for _, name := range sortedKeys(b.Attributes) { - a := b.Attributes[name] - attr := &proto.Schema_Attribute{ - Name: name, - Description: a.Description, - Optional: a.Optional, - Computed: a.Computed, - Required: a.Required, - Sensitive: a.Sensitive, - } - - ty, err := json.Marshal(a.Type) - if err != nil { - panic(err) - } - - attr.Type = ty - - block.Attributes = append(block.Attributes, attr) - } - - for _, name := range sortedKeys(b.BlockTypes) { - b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) - } - - return block -} - -func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { - var nesting proto.Schema_NestedBlock_NestingMode - switch b.Nesting { - case configschema.NestingSingle: - nesting = proto.Schema_NestedBlock_SINGLE - case configschema.NestingGroup: - nesting = proto.Schema_NestedBlock_GROUP - case configschema.NestingList: - nesting = proto.Schema_NestedBlock_LIST - case configschema.NestingSet: - nesting = proto.Schema_NestedBlock_SET - case configschema.NestingMap: - nesting = proto.Schema_NestedBlock_MAP - default: - nesting = proto.Schema_NestedBlock_INVALID - } - return &proto.Schema_NestedBlock{ - TypeName: name, - Block: ConfigSchemaToProto(&b.Block), - Nesting: nesting, - MinItems: int64(b.MinItems), - MaxItems: int64(b.MaxItems), - } -} - -// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. -func ProtoToProviderSchema(s *proto.Schema) providers.Schema { - return providers.Schema{ - Version: s.Version, - Block: ProtoToConfigSchema(s.Block), - } -} - -// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it -// to a terraform *configschema.Block. -func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { - block := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - BlockTypes: make(map[string]*configschema.NestedBlock), - } - - for _, a := range b.Attributes { - attr := &configschema.Attribute{ - Description: a.Description, - Required: a.Required, - Optional: a.Optional, - Computed: a.Computed, - Sensitive: a.Sensitive, - } - - if err := json.Unmarshal(a.Type, &attr.Type); err != nil { - panic(err) - } - - block.Attributes[a.Name] = attr - } - - for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(b) - } - - return block -} - -func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { - var nesting configschema.NestingMode - switch b.Nesting { - case proto.Schema_NestedBlock_SINGLE: - nesting = configschema.NestingSingle - case proto.Schema_NestedBlock_GROUP: - nesting = configschema.NestingGroup - case proto.Schema_NestedBlock_LIST: - nesting = configschema.NestingList - case proto.Schema_NestedBlock_MAP: - nesting = configschema.NestingMap - case proto.Schema_NestedBlock_SET: - nesting = configschema.NestingSet - default: - // In all other cases we'll leave it as the zero value (invalid) and - // let the caller validate it and deal with this. - } - - nb := &configschema.NestedBlock{ - Nesting: nesting, - MinItems: int(b.MinItems), - MaxItems: int(b.MaxItems), - } - - nested := ProtoToConfigSchema(b.Block) - nb.Block = *nested - return nb -} - -// sortedKeys returns the lexically sorted keys from the given map. This is -// used to make schema conversions are deterministic. This panics if map keys -// are not a string. -func sortedKeys(m interface{}) []string { - v := reflect.ValueOf(m) - keys := make([]string, v.Len()) - - mapKeys := v.MapKeys() - for i, k := range mapKeys { - keys[i] = k.Interface().(string) - } - - sort.Strings(keys) - return keys -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go deleted file mode 100644 index 729e9709..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go +++ /dev/null @@ -1,64 +0,0 @@ -package discovery - -// Error is a type used to describe situations that the caller must handle -// since they indicate some form of user error. -// -// The functions and methods that return these specialized errors indicate so -// in their documentation. The Error type should not itself be used directly, -// but rather errors should be compared using the == operator with the -// error constants in this package. -// -// Values of this type are _not_ used when the error being reported is an -// operational error (server unavailable, etc) or indicative of a bug in -// this package or its caller. -type Error string - -// ErrorNoSuitableVersion indicates that a suitable version (meeting given -// constraints) is not available. -const ErrorNoSuitableVersion = Error("no suitable version is available") - -// ErrorNoVersionCompatible indicates that all of the available versions -// that otherwise met constraints are not compatible with the current -// version of Terraform. -const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") - -// ErrorVersionIncompatible indicates that all of the versions within the -// constraints are not compatible with the current version of Terrafrom, though -// there does exist a version outside of the constaints that is compatible. -const ErrorVersionIncompatible = Error("incompatible provider version") - -// ErrorNoSuchProvider indicates that no provider exists with a name given -const ErrorNoSuchProvider = Error("no provider exists with the given name") - -// ErrorNoVersionCompatibleWithPlatform indicates that all of the available -// versions that otherwise met constraints are not compatible with the -// requested platform -const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") - -// ErrorMissingChecksumVerification indicates that either the provider -// distribution is missing the SHA256SUMS file or the checksum file does -// not contain a checksum for the binary plugin -const ErrorMissingChecksumVerification = Error("unable to verify checksum") - -// ErrorChecksumVerification indicates that the current checksum of the -// provider plugin has changed since the initial release and is not trusted -// to download -const ErrorChecksumVerification = Error("unexpected plugin checksum") - -// ErrorSignatureVerification indicates that the digital signature for a -// provider distribution could not be verified for one of the following -// reasons: missing signature file, missing public key, or the signature -// was not signed by any known key for the publisher -const ErrorSignatureVerification = Error("unable to verify signature") - -// ErrorServiceUnreachable indicates that the network was unable to connect -// to the registry service -const ErrorServiceUnreachable = Error("registry service is unreachable") - -// ErrorPublicRegistryUnreachable indicates that the network was unable to connect -// to the public registry in particular, so we can show a link to the statuspage -const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") - -func (err Error) Error() string { - return string(err) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go deleted file mode 100644 index f053312b..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go +++ /dev/null @@ -1,191 +0,0 @@ -package discovery - -import ( - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" -) - -// FindPlugins looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider") and -// returns a PluginMetaSet representing the discovered potential-plugins. -// -// Currently this supports two different naming schemes. The current -// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing -// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is -// files directly in the given directory whose names are like -// terraform-$KIND-$NAME. -// -// Only one plugin will be returned for each unique plugin (name, version) -// pair, with preference given to files found in earlier directories. -// -// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. -func FindPlugins(kind string, dirs []string) PluginMetaSet { - return ResolvePluginPaths(FindPluginPaths(kind, dirs)) -} - -// FindPluginPaths looks in the given directories for files whose filenames -// suggest that they are plugins of the given kind (e.g. "provider"). -// -// The return value is a list of absolute paths that appear to refer to -// plugins in the given directories, based only on what can be inferred -// from the naming scheme. The paths returned are ordered such that files -// in later dirs appear after files in earlier dirs in the given directory -// list. Within the same directory plugins are returned in a consistent but -// undefined order. -func FindPluginPaths(kind string, dirs []string) []string { - // This is just a thin wrapper around findPluginPaths so that we can - // use the latter in tests with a fake machineName so we can use our - // test fixtures. - return findPluginPaths(kind, dirs) -} - -func findPluginPaths(kind string, dirs []string) []string { - prefix := "terraform-" + kind + "-" - - ret := make([]string, 0, len(dirs)) - - for _, dir := range dirs { - items, err := ioutil.ReadDir(dir) - if err != nil { - // Ignore missing dirs, non-dirs, etc - continue - } - - log.Printf("[DEBUG] checking for %s in %q", kind, dir) - - for _, item := range items { - fullName := item.Name() - - if !strings.HasPrefix(fullName, prefix) { - continue - } - - // New-style paths must have a version segment in filename - if strings.Contains(strings.ToLower(fullName), "_v") { - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[DEBUG] found %s %q", kind, fullName) - ret = append(ret, filepath.Clean(absPath)) - continue - } - - // Legacy style with files directly in the base directory - absPath, err := filepath.Abs(filepath.Join(dir, fullName)) - if err != nil { - log.Printf("[ERROR] plugin filepath error: %s", err) - continue - } - - // Check that the file we found is usable - if !pathIsFile(absPath) { - log.Printf("[ERROR] ignoring non-file %s", absPath) - continue - } - - log.Printf("[WARN] found legacy %s %q", kind, fullName) - - ret = append(ret, filepath.Clean(absPath)) - } - } - - return ret -} - -// Returns true if and only if the given path refers to a file or a symlink -// to a file. -func pathIsFile(path string) bool { - info, err := os.Stat(path) - if err != nil { - return false - } - - return !info.IsDir() -} - -// ResolvePluginPaths takes a list of paths to plugin executables (as returned -// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the -// referenced plugins. -// -// If the same combination of plugin name and version appears multiple times, -// the earlier reference will be preferred. Several different versions of -// the same plugin name may be returned, in which case the methods of -// PluginMetaSet can be used to filter down. -func ResolvePluginPaths(paths []string) PluginMetaSet { - s := make(PluginMetaSet) - - type nameVersion struct { - Name string - Version string - } - found := make(map[nameVersion]struct{}) - - for _, path := range paths { - baseName := strings.ToLower(filepath.Base(path)) - if !strings.HasPrefix(baseName, "terraform-") { - // Should never happen with reasonable input - continue - } - - baseName = baseName[10:] - firstDash := strings.Index(baseName, "-") - if firstDash == -1 { - // Should never happen with reasonable input - continue - } - - baseName = baseName[firstDash+1:] - if baseName == "" { - // Should never happen with reasonable input - continue - } - - // Trim the .exe suffix used on Windows before we start wrangling - // the remainder of the path. - if strings.HasSuffix(baseName, ".exe") { - baseName = baseName[:len(baseName)-4] - } - - parts := strings.SplitN(baseName, "_v", 2) - name := parts[0] - version := VersionZero - if len(parts) == 2 { - version = parts[1] - } - - // Auto-installed plugins contain an extra name portion representing - // the expected plugin version, which we must trim off. - if underX := strings.Index(version, "_x"); underX != -1 { - version = version[:underX] - } - - if _, ok := found[nameVersion{name, version}]; ok { - // Skip duplicate versions of the same plugin - // (We do this during this step because after this we will be - // dealing with sets and thus lose our ordering with which to - // decide preference.) - continue - } - - s.Add(PluginMeta{ - Name: name, - Version: VersionStr(version), - Path: path, - }) - found[nameVersion{name, version}] = struct{}{} - } - - return s -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go deleted file mode 100644 index 7c63d4c8..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go +++ /dev/null @@ -1,693 +0,0 @@ -package discovery - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - - "github.com/hashicorp/errwrap" - getter "github.com/hashicorp/go-getter" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/httpclient" - "github.com/hashicorp/terraform/registry" - "github.com/hashicorp/terraform/registry/regsrc" - "github.com/hashicorp/terraform/registry/response" - "github.com/hashicorp/terraform/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/cli" -) - -// Releases are located by querying the terraform registry. - -const protocolVersionHeader = "x-terraform-protocol-version" - -var httpClient *http.Client - -var errVersionNotFound = errors.New("version not found") - -func init() { - httpClient = httpclient.New() - - httpGetter := &getter.HttpGetter{ - Client: httpClient, - Netrc: true, - } - - getter.Getters["http"] = httpGetter - getter.Getters["https"] = httpGetter -} - -// An Installer maintains a local cache of plugins by downloading plugins -// from an online repository. -type Installer interface { - Get(provider addrs.Provider, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) - PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) -} - -// ProviderInstaller is an Installer implementation that knows how to -// download Terraform providers from the official HashiCorp releases service -// into a local directory. The files downloaded are compliant with the -// naming scheme expected by FindPlugins, so the target directory of a -// provider installer can be used as one of several plugin discovery sources. -type ProviderInstaller struct { - Dir string - - // Cache is used to access and update a local cache of plugins if non-nil. - // Can be nil to disable caching. - Cache PluginCache - - PluginProtocolVersion uint - - // OS and Arch specify the OS and architecture that should be used when - // installing plugins. These use the same labels as the runtime.GOOS and - // runtime.GOARCH variables respectively, and indeed the values of these - // are used as defaults if either of these is the empty string. - OS string - Arch string - - // Skip checksum and signature verification - SkipVerify bool - - Ui cli.Ui // Ui for output - - // Services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - Services *disco.Disco - - // registry client - registry *registry.Client -} - -// Get is part of an implementation of type Installer, and attempts to download -// and install a Terraform provider matching the given constraints. -// -// This method may return one of a number of sentinel errors from this -// package to indicate issues that are likely to be resolvable via user action: -// -// ErrorNoSuchProvider: no provider with the given name exists in the repository. -// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. -// ErrorNoVersionCompatible: a plugin was found within the constraints but it is -// incompatible with the current Terraform version. -// -// These errors should be recognized and handled as special cases by the caller -// to present a suitable user-oriented error message. -// -// All other errors indicate an internal problem that is likely _not_ solvable -// through user action, or at least not within Terraform's scope. Error messages -// are produced under the assumption that if presented to the user they will -// be presented alongside context about what is being installed, and thus the -// error messages do not redundantly include such information. -func (i *ProviderInstaller) Get(provider addrs.Provider, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { - var diags tfdiags.Diagnostics - - // a little bit of initialization. - if i.OS == "" { - i.OS = runtime.GOOS - } - if i.Arch == "" { - i.Arch = runtime.GOARCH - } - if i.registry == nil { - i.registry = registry.NewClient(i.Services, nil) - } - - // get a full listing of versions for the requested provider - allVersions, err := i.listProviderVersions(provider) - - // TODO: return multiple errors - if err != nil { - log.Printf("[DEBUG] %s", err) - if registry.IsServiceUnreachable(err) { - registryHost, err := i.hostname() - if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { - return PluginMeta{}, diags, ErrorPublicRegistryUnreachable - } - return PluginMeta{}, diags, ErrorServiceUnreachable - } - if registry.IsServiceNotProvided(err) { - return PluginMeta{}, diags, err - } - return PluginMeta{}, diags, ErrorNoSuchProvider - } - - // Add any warnings from the response to diags - for _, warning := range allVersions.Warnings { - hostname, err := i.hostname() - if err != nil { - return PluginMeta{}, diags, err - } - diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) - diags = diags.Append(diag) - } - - if len(allVersions.Versions) == 0 { - return PluginMeta{}, diags, ErrorNoSuitableVersion - } - providerSource := allVersions.ID - - // Filter the list of plugin versions to those which meet the version constraints - versions := allowedVersions(allVersions, req) - if len(versions) == 0 { - return PluginMeta{}, diags, ErrorNoSuitableVersion - } - - // sort them newest to oldest. The newest version wins! - response.ProviderVersionCollection(versions).Sort() - - // if the chosen provider version does not support the requested platform, - // filter the list of acceptable versions to those that support that platform - if err := i.checkPlatformCompatibility(versions[0]); err != nil { - versions = i.platformCompatibleVersions(versions) - if len(versions) == 0 { - return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform - } - } - - // we now have a winning platform-compatible version - versionMeta := versions[0] - v := VersionStr(versionMeta.Version).MustParse() - - // check protocol compatibility - if err := i.checkPluginProtocol(versionMeta); err != nil { - closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) - if err != nil { - // No operation here if we can't find a version with compatible protocol - return PluginMeta{}, diags, err - } - - // Prompt version suggestion to UI based on closest protocol match - var errMsg string - closestVersion := VersionStr(closestMatch.Version).MustParse() - if v.NewerThan(closestVersion) { - errMsg = providerProtocolTooNew - } else { - errMsg = providerProtocolTooOld - } - - constraintStr := req.String() - if constraintStr == "" { - constraintStr = "(any version)" - } - - return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( - errMsg, provider.LegacyString(), v.String(), tfversion.String(), - closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) - } - - downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) - if err != nil { - return PluginMeta{}, diags, err - } - providerURL := downloadURLs.DownloadURL - - if !i.SkipVerify { - // Terraform verifies the integrity of a provider release before downloading - // the plugin binary. The digital signature (SHA256SUMS.sig) on the - // release distribution (SHA256SUMS) is verified with the public key of the - // publisher provided in the Terraform Registry response, ensuring that - // everything is as intended by the publisher. The checksum of the provider - // plugin is expected in the SHA256SUMS file and is double checked to match - // the checksum of the original published release to the Registry. This - // enforces immutability of releases between the Registry and the plugin's - // host location. Lastly, the integrity of the binary is verified upon - // download matches the Registry and signed checksum. - sha256, err := i.getProviderChecksum(downloadURLs) - if err != nil { - return PluginMeta{}, diags, err - } - - // add the checksum parameter for go-getter to verify the download for us. - if sha256 != "" { - providerURL = providerURL + "?checksum=sha256:" + sha256 - } - } - - printedProviderName := fmt.Sprintf("%q (%s)", provider.LegacyString(), providerSource) - i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version)) - log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version) - err = i.install(provider, v, providerURL) - if err != nil { - return PluginMeta{}, diags, err - } - - // Find what we just installed - // (This is weird, because go-getter doesn't directly return - // information about what was extracted, and we just extracted - // the archive directly into a shared dir here.) - log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider.LegacyString(), versionMeta.Version) - metas := FindPlugins("provider", []string{i.Dir}) - log.Printf("[DEBUG] all plugins found %#v", metas) - metas, _ = metas.ValidateVersions() - metas = metas.WithName(provider.Type).WithVersion(v) - log.Printf("[DEBUG] filtered plugins %#v", metas) - if metas.Count() == 0 { - // This should never happen. Suggests that the release archive - // contains an executable file whose name doesn't match the - // expected convention. - return PluginMeta{}, diags, fmt.Errorf( - "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", - versionMeta.Version, - ) - } - - if metas.Count() > 1 { - // This should also never happen, and suggests that a - // particular version was re-released with a different - // executable filename. We consider releases as immutable, so - // this is an error. - return PluginMeta{}, diags, fmt.Errorf( - "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", - versionMeta.Version, - ) - } - - // By now we know we have exactly one meta, and so "Newest" will - // return that one. - return metas.Newest(), diags, nil -} - -func (i *ProviderInstaller) install(provider addrs.Provider, version Version, url string) error { - if i.Cache != nil { - log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider.LegacyString(), version) - cached := i.Cache.CachedPluginPath("provider", provider.Type, version) - if cached == "" { - log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider.LegacyString(), version, url) - err := getter.Get(i.Cache.InstallDir(), url) - if err != nil { - return err - } - // should now be in cache - cached = i.Cache.CachedPluginPath("provider", provider.Type, version) - if cached == "" { - // should never happen if the getter is behaving properly - // and the plugins are packaged properly. - return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) - } - } - - // Link or copy the cached binary into our install dir so the - // normal resolution machinery can find it. - filename := filepath.Base(cached) - targetPath := filepath.Join(i.Dir, filename) - // check if the target dir exists, and create it if not - var err error - if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { - err = os.MkdirAll(i.Dir, 0700) - } - if err != nil { - return err - } - - log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider.LegacyString(), version, targetPath, cached) - - // Delete if we can. If there's nothing there already then no harm done. - // This is important because we can't create a link if there's - // already a file of the same name present. - // (any other error here we'll catch below when we try to write here) - os.Remove(targetPath) - - // We don't attempt linking on Windows because links are not - // comprehensively supported by all tools/apps in Windows and - // so we choose to be conservative to avoid creating any - // weird issues for Windows users. - linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned - if runtime.GOOS != "windows" { - // Try hard linking first. Hard links are preferable because this - // creates a self-contained directory that doesn't depend on the - // cache after install. - linkErr = os.Link(cached, targetPath) - - // If that failed, try a symlink. This _does_ depend on the cache - // after install, so the user must manage the cache more carefully - // in this case, but avoids creating redundant copies of the - // plugins on disk. - if linkErr != nil { - linkErr = os.Symlink(cached, targetPath) - } - } - - // If we still have an error then we'll try a copy as a fallback. - // In this case either the OS is Windows or the target filesystem - // can't support symlinks. - if linkErr != nil { - srcFile, err := os.Open(cached) - if err != nil { - return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) - } - defer srcFile.Close() - - destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create %s: %s", targetPath, err) - } - - _, err = io.Copy(destFile, srcFile) - if err != nil { - destFile.Close() - return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) - } - - err = destFile.Close() - if err != nil { - return fmt.Errorf("error creating %s: %s", targetPath, err) - } - } - - // One way or another, by the time we get here we should have either - // a link or a copy of the cached plugin within i.Dir, as expected. - } else { - log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider.LegacyString(), version, url) - err := getter.Get(i.Dir, url) - if err != nil { - return err - } - } - return nil -} - -func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { - purge := make(PluginMetaSet) - - present := FindPlugins("provider", []string{i.Dir}) - for meta := range present { - chosen, ok := used[meta.Name] - if !ok { - purge.Add(meta) - } - if chosen.Path != meta.Path { - purge.Add(meta) - } - } - - removed := make(PluginMetaSet) - var errs error - for meta := range purge { - path := meta.Path - err := os.Remove(path) - if err != nil { - errs = multierror.Append(errs, fmt.Errorf( - "failed to remove unused provider plugin %s: %s", - path, err, - )) - } else { - removed.Add(meta) - } - } - - return removed, errs -} - -func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { - // Get SHA256SUMS file. - shasums, err := getFile(resp.ShasumsURL) - if err != nil { - log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) - return "", ErrorMissingChecksumVerification - } - - // Get SHA256SUMS.sig file. - signature, err := getFile(resp.ShasumsSignatureURL) - if err != nil { - log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) - return "", ErrorSignatureVerification - } - - // Verify the GPG signature returned from the Registry. - asciiArmor := resp.SigningKeys.GPGASCIIArmor() - signer, err := verifySig(shasums, signature, asciiArmor) - if err != nil { - log.Printf("[ERROR] error verifying signature: %s", err) - return "", ErrorSignatureVerification - } - - // Also verify the GPG signature against the HashiCorp public key. This is - // a temporary additional check until a more robust key verification - // process is added in a future release. - _, err = verifySig(shasums, signature, HashicorpPublicKey) - if err != nil { - log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) - return "", ErrorSignatureVerification - } - - // Display identity for GPG key which succeeded verifying the signature. - // This could also be used to display to the user with i.Ui.Info(). - identities := []string{} - for k := range signer.Identities { - identities = append(identities, k) - } - identity := strings.Join(identities, ", ") - log.Printf("[DEBUG] verified GPG signature with key from %s", identity) - - // Extract checksum for this os/arch platform binary and verify against Registry - checksum := checksumForFile(shasums, resp.Filename) - if checksum == "" { - log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) - return "", ErrorMissingChecksumVerification - } else if checksum != resp.Shasum { - log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) - return "", ErrorChecksumVerification - } - - return checksum, nil -} - -func (i *ProviderInstaller) hostname() (string, error) { - provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) - svchost, err := provider.SvcHost() - if err != nil { - return "", err - } - - return svchost.ForDisplay(), nil -} - -// list all versions available for the named provider -func (i *ProviderInstaller) listProviderVersions(provider addrs.Provider) (*response.TerraformProviderVersions, error) { - req := regsrc.NewTerraformProvider(provider.Type, i.OS, i.Arch) - versions, err := i.registry.TerraformProviderVersions(req) - return versions, err -} - -func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { - urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) - if urls == nil { - return nil, fmt.Errorf("No download urls found for provider %s", name) - } - return urls, err -} - -// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. -// Prerelease versions are filtered. -func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { - // Loop through all the provider versions to find the earliest and latest - // versions that match the installer protocol to then select the closest of the two - var latest, earliest *response.TerraformProviderVersion - for _, version := range versions { - // Prereleases are filtered and will not be suggested - v, err := VersionStr(version.Version).Parse() - if err != nil || v.IsPrerelease() { - continue - } - - if err := i.checkPluginProtocol(version); err == nil { - if earliest == nil { - // Found the first provider version with compatible protocol - earliest = version - } - // Update the latest protocol compatible version - latest = version - } - } - if earliest == nil { - // No compatible protocol was found for any version - return nil, ErrorNoVersionCompatible - } - - // Convert protocols to comparable types - protoString := strconv.Itoa(int(i.PluginProtocolVersion)) - protocolVersion, err := VersionStr(protoString).Parse() - if err != nil { - return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) - } - - earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() - if err != nil { - return nil, err - } - - // Compare installer protocol version with the first protocol listed of the earliest match - // [A, B] where A is assumed the earliest compatible major version of the protocol pair - if protocolVersion.NewerThan(earliestVersionProtocol) { - // Provider protocols are too old, the closest version is the earliest compatible version - return earliest, nil - } - - // Provider protocols are too new, the closest version is the latest compatible version - return latest, nil -} - -func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { - // TODO: should this be a different error? We should probably differentiate between - // no compatible versions and no protocol versions listed at all - if len(versionMeta.Protocols) == 0 { - return fmt.Errorf("no plugin protocol versions listed") - } - - protoString := strconv.Itoa(int(i.PluginProtocolVersion)) - protocolVersion, err := VersionStr(protoString).Parse() - if err != nil { - return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) - } - protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() - if err != nil { - // This should not fail if the preceding function succeeded. - return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) - } - - for _, p := range versionMeta.Protocols { - proPro, err := VersionStr(p).Parse() - if err != nil { - // invalid protocol reported by the registry. Move along. - log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) - continue - } - // success! - if protocolConstraint.Allows(proPro) { - return nil - } - } - - return ErrorNoVersionCompatible -} - -// REVIEWER QUESTION (again): this ends up swallowing a bunch of errors from -// checkPluginProtocol. Do they need to be percolated up better, or would -// debug messages would suffice in these situations? -func (i *ProviderInstaller) findPlatformCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { - for _, version := range versions { - if err := i.checkPlatformCompatibility(version); err == nil { - return version, nil - } - } - - return nil, ErrorNoVersionCompatibleWithPlatform -} - -// platformCompatibleVersions returns a list of provider versions that are -// compatible with the requested platform. -func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { - var v []*response.TerraformProviderVersion - for _, version := range versions { - if err := i.checkPlatformCompatibility(version); err == nil { - v = append(v, version) - } - } - return v -} - -func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { - if len(versionMeta.Platforms) == 0 { - return fmt.Errorf("no supported provider platforms listed") - } - for _, p := range versionMeta.Platforms { - if p.Arch == i.Arch && p.OS == i.OS { - return nil - } - } - return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) -} - -// take the list of available versions for a plugin, and filter out those that -// don't fit the constraints. -func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { - var allowed []*response.TerraformProviderVersion - - for _, v := range available.Versions { - version, err := VersionStr(v.Version).Parse() - if err != nil { - log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) - continue - } - if required.Allows(version) { - allowed = append(allowed, v) - } - } - return allowed -} - -func checksumForFile(sums []byte, name string) string { - for _, line := range strings.Split(string(sums), "\n") { - parts := strings.Fields(line) - if len(parts) > 1 && parts[1] == name { - return parts[0] - } - } - return "" -} - -func getFile(url string) ([]byte, error) { - resp, err := httpClient.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s", resp.Status) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return data, err - } - return data, nil -} - -// providerProtocolTooOld is a message sent to the CLI UI if the provider's -// supported protocol versions are too old for the user's version of terraform, -// but an older version of the provider is compatible. -const providerProtocolTooOld = ` -[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] - -Provider version %s is the earliest compatible version. Select it with -the following version constraint: - - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on -compatibility between provider and Terraform versions. -` - -// providerProtocolTooNew is a message sent to the CLI UI if the provider's -// supported protocol versions are too new for the user's version of terraform, -// and the user could either upgrade terraform or choose an older version of the -// provider -const providerProtocolTooNew = ` -[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] - -Provider version %s is the latest compatible version. Select it with -the following constraint: - - version = %q - -Terraform checked all of the plugin versions matching the given constraint: - %s - -Consult the documentation for this provider for more information on -compatibility between provider and Terraform versions. - -Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. -` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go deleted file mode 100644 index 1a100426..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go +++ /dev/null @@ -1,48 +0,0 @@ -package discovery - -// PluginCache is an interface implemented by objects that are able to maintain -// a cache of plugins. -type PluginCache interface { - // CachedPluginPath returns a path where the requested plugin is already - // cached, or an empty string if the requested plugin is not yet cached. - CachedPluginPath(kind string, name string, version Version) string - - // InstallDir returns the directory that new plugins should be installed into - // in order to populate the cache. This directory should be used as the - // first argument to getter.Get when downloading plugins with go-getter. - // - // After installing into this directory, use CachedPluginPath to obtain the - // path where the plugin was installed. - InstallDir() string -} - -// NewLocalPluginCache returns a PluginCache that caches plugins in a -// given local directory. -func NewLocalPluginCache(dir string) PluginCache { - return &pluginCache{ - Dir: dir, - } -} - -type pluginCache struct { - Dir string -} - -func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { - allPlugins := FindPlugins(kind, []string{c.Dir}) - plugins := allPlugins.WithName(name).WithVersion(version) - - if plugins.Count() == 0 { - // nothing cached - return "" - } - - // There should generally be only one plugin here; if there's more than - // one match for some reason then we'll just choose one arbitrarily. - plugin := plugins.Newest() - return plugin.Path -} - -func (c *pluginCache) InstallDir() string { - return c.Dir -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go deleted file mode 100644 index 4622ca05..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go +++ /dev/null @@ -1,34 +0,0 @@ -package discovery - -// HashicorpPublicKey is the HashiCorp public key, also available at -// https://www.hashicorp.com/security -const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go deleted file mode 100644 index bdcebcb9..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go +++ /dev/null @@ -1,41 +0,0 @@ -package discovery - -import ( - "crypto/sha256" - "io" - "os" -) - -// PluginMeta is metadata about a plugin, useful for launching the plugin -// and for understanding which plugins are available. -type PluginMeta struct { - // Name is the name of the plugin, e.g. as inferred from the plugin - // binary's filename, or by explicit configuration. - Name string - - // Version is the semver version of the plugin, expressed as a string - // that might not be semver-valid. - Version VersionStr - - // Path is the absolute path of the executable that can be launched - // to provide the RPC server for this plugin. - Path string -} - -// SHA256 returns a SHA256 hash of the content of the referenced executable -// file, or an error if the file's contents cannot be read. -func (m PluginMeta) SHA256() ([]byte, error) { - f, err := os.Open(m.Path) - if err != nil { - return nil, err - } - defer f.Close() - - h := sha256.New() - _, err = io.Copy(h, f) - if err != nil { - return nil, err - } - - return h.Sum(nil), nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go deleted file mode 100644 index 3a992892..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go +++ /dev/null @@ -1,195 +0,0 @@ -package discovery - -// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. -// -// Methods on this type allow filtering of the set to produce subsets that -// meet more restrictive criteria. -type PluginMetaSet map[PluginMeta]struct{} - -// Add inserts the given PluginMeta into the receiving set. This is a no-op -// if the given meta is already present. -func (s PluginMetaSet) Add(p PluginMeta) { - s[p] = struct{}{} -} - -// Remove removes the given PluginMeta from the receiving set. This is a no-op -// if the given meta is not already present. -func (s PluginMetaSet) Remove(p PluginMeta) { - delete(s, p) -} - -// Has returns true if the given meta is in the receiving set, or false -// otherwise. -func (s PluginMetaSet) Has(p PluginMeta) bool { - _, ok := s[p] - return ok -} - -// Count returns the number of metas in the set -func (s PluginMetaSet) Count() int { - return len(s) -} - -// ValidateVersions returns two new PluginMetaSets, separating those with -// versions that have syntax-valid semver versions from those that don't. -// -// Eliminating invalid versions from consideration (and possibly warning about -// them) is usually the first step of working with a meta set after discovery -// has completed. -func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { - valid = make(PluginMetaSet) - invalid = make(PluginMetaSet) - for p := range s { - if _, err := p.Version.Parse(); err == nil { - valid.Add(p) - } else { - invalid.Add(p) - } - } - return -} - -// WithName returns the subset of metas that have the given name. -func (s PluginMetaSet) WithName(name string) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - if p.Name == name { - ns.Add(p) - } - } - return ns -} - -// WithVersion returns the subset of metas that have the given version. -// -// This should be used only with the "valid" result from ValidateVersions; -// it will ignore any plugin metas that have invalid version strings. -func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { - ns := make(PluginMetaSet) - for p := range s { - gotVersion, err := p.Version.Parse() - if err != nil { - continue - } - if gotVersion.Equal(version) { - ns.Add(p) - } - } - return ns -} - -// ByName groups the metas in the set by their Names, returning a map. -func (s PluginMetaSet) ByName() map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - ret[p.Name].Add(p) - } - return ret -} - -// Newest returns the one item from the set that has the newest Version value. -// -// The result is meaningful only if the set is already filtered such that -// all of the metas have the same Name. -// -// If there isn't at least one meta in the set then this function will panic. -// Use Count() to ensure that there is at least one value before calling. -// -// If any of the metas have invalid version strings then this function will -// panic. Use ValidateVersions() first to filter out metas with invalid -// versions. -// -// If two metas have the same Version then one is arbitrarily chosen. This -// situation should be avoided by pre-filtering the set. -func (s PluginMetaSet) Newest() PluginMeta { - if len(s) == 0 { - panic("can't call NewestStable on empty PluginMetaSet") - } - - var first = true - var winner PluginMeta - var winnerVersion Version - for p := range s { - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - - if first == true || version.NewerThan(winnerVersion) { - winner = p - winnerVersion = version - first = false - } - } - - return winner -} - -// ConstrainVersions takes a set of requirements and attempts to -// return a map from name to a set of metas that have the matching -// name and an appropriate version. -// -// If any of the given requirements match *no* plugins then its PluginMetaSet -// in the returned map will be empty. -// -// All viable metas are returned, so the caller can apply any desired filtering -// to reduce down to a single option. For example, calling Newest() to obtain -// the highest available version. -// -// If any of the metas in the set have invalid version strings then this -// function will panic. Use ValidateVersions() first to filter out metas with -// invalid versions. -func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { - ret := make(map[string]PluginMetaSet) - for p := range s { - name := p.Name - allowedVersions, ok := reqd[name] - if !ok { - continue - } - if _, ok := ret[p.Name]; !ok { - ret[p.Name] = make(PluginMetaSet) - } - version, err := p.Version.Parse() - if err != nil { - panic(err) - } - if allowedVersions.Allows(version) { - ret[p.Name].Add(p) - } - } - return ret -} - -// OverridePaths returns a new set where any existing plugins with the given -// names are removed and replaced with the single path given in the map. -// -// This is here only to continue to support the legacy way of overriding -// plugin binaries in the .terraformrc file. It treats all given plugins -// as pre-versioning (version 0.0.0). This mechanism will eventually be -// phased out, with vendor directories being the intended replacement. -func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { - ret := make(PluginMetaSet) - for p := range s { - if _, ok := paths[p.Name]; ok { - // Skip plugins that we're overridding - continue - } - - ret.Add(p) - } - - // Now add the metadata for overriding plugins - for name, path := range paths { - ret.Add(PluginMeta{ - Name: name, - Version: VersionZero, - Path: path, - }) - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go deleted file mode 100644 index 0466ab25..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go +++ /dev/null @@ -1,111 +0,0 @@ -package discovery - -import ( - "bytes" -) - -// PluginInstallProtocolVersion is the protocol version TF-core -// supports to communicate with servers, and is used to resolve -// plugin discovery with terraform registry, in addition to -// any specified plugin version constraints -const PluginInstallProtocolVersion = 5 - -// PluginRequirements describes a set of plugins (assumed to be of a consistent -// kind) that are required to exist and have versions within the given -// corresponding sets. -type PluginRequirements map[string]*PluginConstraints - -// PluginConstraints represents an element of PluginRequirements describing -// the constraints for a single plugin. -type PluginConstraints struct { - // Specifies that the plugin's version must be within the given - // constraints. - Versions Constraints - - // If non-nil, the hash of the on-disk plugin executable must exactly - // match the SHA256 hash given here. - SHA256 []byte -} - -// Allows returns true if the given version is within the receiver's version -// constraints. -func (s *PluginConstraints) Allows(v Version) bool { - return s.Versions.Allows(v) -} - -// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, -// either because it matches the constraint or because there is no such -// constraint. -func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { - if s.SHA256 == nil { - return true - } - return bytes.Equal(s.SHA256, digest) -} - -// Merge takes the contents of the receiver and the other given requirements -// object and merges them together into a single requirements structure -// that satisfies both sets of requirements. -// -// Note that it doesn't make sense to merge two PluginRequirements with -// differing required plugin SHA256 hashes, since the result will never -// match any plugin. -func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { - ret := make(PluginRequirements) - for n, c := range r { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - for n, c := range other { - if existing, exists := ret[n]; exists { - ret[n].Versions = ret[n].Versions.Append(c.Versions) - - if existing.SHA256 != nil { - if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { - // If we've been asked to merge two constraints with - // different SHA256 hashes then we'll produce a dummy value - // that can never match anything. This is a silly edge case - // that no reasonable caller should hit. - ret[n].SHA256 = []byte(invalidProviderHash) - } - } else { - ret[n].SHA256 = c.SHA256 // might still be nil - } - } else { - ret[n] = &PluginConstraints{ - Versions: Constraints{}.Append(c.Versions), - SHA256: c.SHA256, - } - } - } - return ret -} - -// LockExecutables applies additional constraints to the receiver that -// require plugin executables with specific SHA256 digests. This modifies -// the receiver in-place, since it's intended to be applied after -// version constraints have been resolved. -// -// The given map must include a key for every plugin that is already -// required. If not, any missing keys will cause the corresponding plugin -// to never match, though the direct caller doesn't necessarily need to -// guarantee this as long as the downstream code _applying_ these constraints -// is able to deal with the non-match in some way. -func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { - for name, cons := range r { - digest := sha256s[name] - - if digest == nil { - // Prevent any match, which will then presumably cause the - // downstream consumer of this requirements to report an error. - cons.SHA256 = []byte(invalidProviderHash) - continue - } - - cons.SHA256 = digest - } -} - -const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go deleted file mode 100644 index 7bbae50c..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go +++ /dev/null @@ -1,19 +0,0 @@ -package discovery - -import ( - "bytes" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// Verify the data using the provided openpgp detached signature and the -// embedded hashicorp public key. -func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) - if err != nil { - return nil, err - } - - return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go deleted file mode 100644 index 4311d510..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go +++ /dev/null @@ -1,77 +0,0 @@ -package discovery - -import ( - "fmt" - "sort" - - version "github.com/hashicorp/go-version" -) - -const VersionZero = "0.0.0" - -// A VersionStr is a string containing a possibly-invalid representation -// of a semver version number. Call Parse on it to obtain a real Version -// object, or discover that it is invalid. -type VersionStr string - -// Parse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s VersionStr) Parse() (Version, error) { - raw, err := version.NewVersion(string(s)) - if err != nil { - return Version{}, err - } - return Version{raw}, nil -} - -// MustParse transforms a VersionStr into a Version if it is -// syntactically valid. If it isn't then it panics. -func (s VersionStr) MustParse() Version { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Version represents a version number that has been parsed from -// a semver string and known to be valid. -type Version struct { - // We wrap this here just because it avoids a proliferation of - // direct go-version imports all over the place, and keeps the - // version-processing details within this package. - raw *version.Version -} - -func (v Version) String() string { - return v.raw.String() -} - -func (v Version) NewerThan(other Version) bool { - return v.raw.GreaterThan(other.raw) -} - -func (v Version) Equal(other Version) bool { - return v.raw.Equal(other.raw) -} - -// IsPrerelease determines if version is a prerelease -func (v Version) IsPrerelease() bool { - return v.raw.Prerelease() != "" -} - -// MinorUpgradeConstraintStr returns a ConstraintStr that would permit -// minor upgrades relative to the receiving version. -func (v Version) MinorUpgradeConstraintStr() ConstraintStr { - segments := v.raw.Segments() - return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) -} - -type Versions []Version - -// Sort sorts version from newest to oldest. -func (v Versions) Sort() { - sort.Slice(v, func(i, j int) bool { - return v[i].NewerThan(v[j]) - }) -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go deleted file mode 100644 index de02f5ec..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go +++ /dev/null @@ -1,89 +0,0 @@ -package discovery - -import ( - "sort" - - version "github.com/hashicorp/go-version" -) - -// A ConstraintStr is a string containing a possibly-invalid representation -// of a version constraint provided in configuration. Call Parse on it to -// obtain a real Constraint object, or discover that it is invalid. -type ConstraintStr string - -// Parse transforms a ConstraintStr into a Constraints if it is -// syntactically valid. If it isn't then an error is returned instead. -func (s ConstraintStr) Parse() (Constraints, error) { - raw, err := version.NewConstraint(string(s)) - if err != nil { - return Constraints{}, err - } - return Constraints{raw}, nil -} - -// MustParse is like Parse but it panics if the constraint string is invalid. -func (s ConstraintStr) MustParse() Constraints { - ret, err := s.Parse() - if err != nil { - panic(err) - } - return ret -} - -// Constraints represents a set of versions which any given Version is either -// a member of or not. -type Constraints struct { - raw version.Constraints -} - -// NewConstraints creates a Constraints based on a version.Constraints. -func NewConstraints(c version.Constraints) Constraints { - return Constraints{c} -} - -// AllVersions is a Constraints containing all versions -var AllVersions Constraints - -func init() { - AllVersions = Constraints{ - raw: make(version.Constraints, 0), - } -} - -// Allows returns true if the given version permitted by the receiving -// constraints set. -func (s Constraints) Allows(v Version) bool { - return s.raw.Check(v.raw) -} - -// Append combines the receiving set with the given other set to produce -// a set that is the intersection of both sets, which is to say that resulting -// constraints contain only the versions that are members of both. -func (s Constraints) Append(other Constraints) Constraints { - raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) - - // Since "raw" is a list of constraints that remove versions from the set, - // "Intersection" is implemented by concatenating together those lists, - // thus leaving behind only the versions not removed by either list. - raw = append(raw, s.raw...) - raw = append(raw, other.raw...) - - // while the set is unordered, we sort these lexically for consistent output - sort.Slice(raw, func(i, j int) bool { - return raw[i].String() < raw[j].String() - }) - - return Constraints{raw} -} - -// String returns a string representation of the set members as a set -// of range constraints. -func (s Constraints) String() string { - return s.raw.String() -} - -// Unconstrained returns true if and only if the receiver is an empty -// constraint set. -func (s Constraints) Unconstrained() bool { - return len(s.raw) == 0 -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go deleted file mode 100644 index 1abdbe29..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go +++ /dev/null @@ -1,563 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - plugin "github.com/hashicorp/go-plugin" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/providers" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. -type GRPCProviderPlugin struct { - plugin.Plugin - GRPCProvider func() proto.ProviderServer -} - -func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvider{ - client: proto.NewProviderClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProviderServer(s, p.GRPCProvider()) - return nil -} - -// GRPCProvider handles the client, or core side of the plugin rpc connection. -// The GRPCProvider methods are mostly a translation layer between the -// terraform provioders types and the grpc proto types, directly converting -// between the two. -type GRPCProvider struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - // TestServer contains a grpc.Server to close when the GRPCProvider is being - // used in an end to end test of a provider. - TestServer *grpc.Server - - // Proto client use to make the grpc service calls. - client proto.ProviderClient - - // this context is created by the plugin package, and is canceled when the - // plugin process ends. - ctx context.Context - - // schema stores the schema for this provider. This is used to properly - // serialize the state for requests. - mu sync.Mutex - schemas providers.GetSchemaResponse -} - -// getSchema is used internally to get the saved provider schema. The schema -// should have already been fetched from the provider, but we have to -// synchronize access to avoid being called concurrently with GetSchema. -func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { - p.mu.Lock() - // unlock inline in case GetSchema needs to be called - if p.schemas.Provider.Block != nil { - p.mu.Unlock() - return p.schemas - } - p.mu.Unlock() - - // the schema should have been fetched already, but give it another shot - // just in case things are being called out of order. This may happen for - // tests. - schemas := p.GetSchema() - if schemas.Diagnostics.HasErrors() { - panic(schemas.Diagnostics.Err()) - } - - return schemas -} - -// getResourceSchema is a helper to extract the schema for a resource, and -// panics if the schema is not available. -func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { - schema := p.getSchema() - resSchema, ok := schema.ResourceTypes[name] - if !ok { - panic("unknown resource type " + name) - } - return resSchema -} - -// gettDatasourceSchema is a helper to extract the schema for a datasource, and -// panics if that schema is not available. -func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { - schema := p.getSchema() - dataSchema, ok := schema.DataSources[name] - if !ok { - panic("unknown data source " + name) - } - return dataSchema -} - -func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { - log.Printf("[TRACE] GRPCProvider: GetSchema") - p.mu.Lock() - defer p.mu.Unlock() - - if p.schemas.Provider.Block != nil { - return p.schemas - } - - resp.ResourceTypes = make(map[string]providers.Schema) - resp.DataSources = make(map[string]providers.Schema) - - // Some providers may generate quite large schemas, and the internal default - // grpc response size limit is 4MB. 64MB should cover most any use case, and - // if we get providers nearing that we may want to consider a finer-grained - // API to fetch individual resource schemas. - // Note: this option is marked as EXPERIMENTAL in the grpc API. - const maxRecvSize = 64 << 20 - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if protoResp.Provider == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) - return resp - } - - resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) - - for name, res := range protoResp.ResourceSchemas { - resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) - } - - for name, data := range protoResp.DataSourceSchemas { - resp.DataSources[name] = convert.ProtoToProviderSchema(data) - } - - p.schemas = resp - - return resp -} - -func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { - log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") - - schema := p.getSchema() - ty := schema.Provider.Block.ImpliedType() - - mp, err := msgpack.Marshal(r.Config, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PrepareProviderConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - config := cty.NullVal(ty) - if protoResp.PreparedConfig != nil { - config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.PreparedConfig = config - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { - log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") - resourceSchema := p.getResourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateResourceTypeConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { - log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") - - dataSchema := p.getDatasourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateDataSourceConfig_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{Msgpack: mp}, - } - - protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { - log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") - - resSchema := p.getResourceSchema(r.TypeName) - - protoReq := &proto.UpgradeResourceState_Request{ - TypeName: r.TypeName, - Version: int64(r.Version), - RawState: &proto.RawState{ - Json: r.RawStateJSON, - Flatmap: r.RawStateFlatmap, - }, - } - - protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.UpgradedState != nil { - state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - - resp.UpgradedState = state - return resp -} - -func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { - log.Printf("[TRACE] GRPCProvider: Configure") - - schema := p.getSchema() - - var mp []byte - - // we don't have anything to marshal if there's no config - mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.Configure_Request{ - TerraformVersion: r.TerraformVersion, - Config: &proto.DynamicValue{ - Msgpack: mp, - }, - } - - protoResp, err := p.client.Configure(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvider) Stop() error { - log.Printf("[TRACE] GRPCProvider: Stop") - - resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) - if err != nil { - return err - } - - if resp.Error != "" { - return errors.New(resp.Error) - } - return nil -} - -func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { - log.Printf("[TRACE] GRPCProvider: ReadResource") - - resSchema := p.getResourceSchema(r.TypeName) - - mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadResource_Request{ - TypeName: r.TypeName, - CurrentState: &proto.DynamicValue{Msgpack: mp}, - Private: r.Private, - } - - protoResp, err := p.client.ReadResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.NewState != nil { - state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.NewState = state - resp.Private = protoResp.Private - - return resp -} - -func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { - log.Printf("[TRACE] GRPCProvider: PlanResourceChange") - - resSchema := p.getResourceSchema(r.TypeName) - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.PlanResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, - PriorPrivate: r.PriorPrivate, - } - - protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.PlannedState != nil { - state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.PlannedState = state - - for _, p := range protoResp.RequiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) - } - - resp.PlannedPrivate = protoResp.PlannedPrivate - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { - log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") - - resSchema := p.getResourceSchema(r.TypeName) - - priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ApplyResourceChange_Request{ - TypeName: r.TypeName, - PriorState: &proto.DynamicValue{Msgpack: priorMP}, - PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, - Config: &proto.DynamicValue{Msgpack: configMP}, - PlannedPrivate: r.PlannedPrivate, - } - - protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - resp.Private = protoResp.Private - - state := cty.NullVal(resSchema.Block.ImpliedType()) - if protoResp.NewState != nil { - state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.NewState = state - - resp.LegacyTypeSystem = protoResp.LegacyTypeSystem - - return resp -} - -func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { - log.Printf("[TRACE] GRPCProvider: ImportResourceState") - - protoReq := &proto.ImportResourceState_Request{ - TypeName: r.TypeName, - Id: r.ID, - } - - protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - for _, imported := range protoResp.ImportedResources { - resource := providers.ImportedResource{ - TypeName: imported.TypeName, - Private: imported.Private, - } - - resSchema := p.getResourceSchema(resource.TypeName) - state := cty.NullVal(resSchema.Block.ImpliedType()) - if imported.State != nil { - state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resource.State = state - resp.ImportedResources = append(resp.ImportedResources, resource) - } - - return resp -} - -func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { - log.Printf("[TRACE] GRPCProvider: ReadDataSource") - - dataSchema := p.getDatasourceSchema(r.TypeName) - - config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ReadDataSource_Request{ - TypeName: r.TypeName, - Config: &proto.DynamicValue{ - Msgpack: config, - }, - } - - protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - state := cty.NullVal(dataSchema.Block.ImpliedType()) - if protoResp.State != nil { - state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - } - resp.State = state - - return resp -} - -// closing the grpc connection is final, and terraform will call it at the end of every phase. -func (p *GRPCProvider) Close() error { - log.Printf("[TRACE] GRPCProvider: Close") - - // Make sure to stop the server if we're not running within go-plugin. - if p.TestServer != nil { - p.TestServer.Stop() - } - - // Check this since it's not automatically inserted during plugin creation. - // It's currently only inserted by the command package, because that is - // where the factory is built and is the only point with access to the - // plugin.Client. - if p.PluginClient == nil { - log.Println("[DEBUG] provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go deleted file mode 100644 index 136c88d6..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go +++ /dev/null @@ -1,178 +0,0 @@ -package plugin - -import ( - "context" - "errors" - "io" - "log" - "sync" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/configs/configschema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/provisioners" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/msgpack" - "google.golang.org/grpc" -) - -// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. -type GRPCProvisionerPlugin struct { - plugin.Plugin - GRPCProvisioner func() proto.ProvisionerServer -} - -func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCProvisioner{ - client: proto.NewProvisionerClient(c), - ctx: ctx, - }, nil -} - -func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) - return nil -} - -// provisioners.Interface grpc implementation -type GRPCProvisioner struct { - // PluginClient provides a reference to the plugin.Client which controls the plugin process. - // This allows the GRPCProvider a way to shutdown the plugin process. - PluginClient *plugin.Client - - client proto.ProvisionerClient - ctx context.Context - - // Cache the schema since we need it for serialization in each method call. - mu sync.Mutex - schema *configschema.Block -} - -func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { - p.mu.Lock() - defer p.mu.Unlock() - - if p.schema != nil { - return provisioners.GetSchemaResponse{ - Provisioner: p.schema, - } - } - - protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - - if protoResp.Provisioner == nil { - resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) - return resp - } - - resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) - - p.schema = resp.Provisioner - - return resp -} - -func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { - schema := p.GetSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) - return resp - } - - mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ValidateProvisionerConfig_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - } - protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) - return resp -} - -func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - schema := p.GetSchema() - if schema.Diagnostics.HasErrors() { - resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) - return resp - } - - mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - // connection is always assumed to be a simple string map - connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - protoReq := &proto.ProvisionResource_Request{ - Config: &proto.DynamicValue{Msgpack: mp}, - Connection: &proto.DynamicValue{Msgpack: connMP}, - } - - outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - - for { - rcv, err := outputClient.Recv() - if rcv != nil { - r.UIOutput.Output(rcv.Output) - } - if err != nil { - if err != io.EOF { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - break - } - - if len(rcv.Diagnostics) > 0 { - resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) - break - } - } - - return resp -} - -func (p *GRPCProvisioner) Stop() error { - protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) - if err != nil { - return err - } - if protoResp.Error != "" { - return errors.New(protoResp.Error) - } - return nil -} - -func (p *GRPCProvisioner) Close() error { - // check this since it's not automatically inserted during plugin creation - if p.PluginClient == nil { - log.Println("[DEBUG] provider has no plugin.Client") - return nil - } - - p.PluginClient.Kill() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go deleted file mode 100644 index e4fb5776..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/plugin.go +++ /dev/null @@ -1,14 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" -) - -// See serve.go for serving plugins - -var VersionedPlugins = map[int]plugin.PluginSet{ - 5: { - "provider": &GRPCProviderPlugin{}, - "provisioner": &GRPCProvisionerPlugin{}, - }, -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go deleted file mode 100644 index 459661a5..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go +++ /dev/null @@ -1,620 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// ResourceProviderPlugin is the plugin.Plugin implementation. -type ResourceProviderPlugin struct { - ResourceProvider func() terraform.ResourceProvider -} - -func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{ - Broker: b, - Provider: p.ResourceProvider(), - }, nil -} - -func (p *ResourceProviderPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvider{Broker: b, Client: c}, nil -} - -// ResourceProvider is an implementation of terraform.ResourceProvider -// that communicates over RPC. -type ResourceProvider struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvider) Stop() error { - var resp ResourceProviderStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - var result ResourceProviderGetSchemaResponse - args := &ResourceProviderGetSchemaArgs{ - Req: req, - } - - err := p.Client.Call("Plugin.GetSchema", args, &result) - if err != nil { - return nil, err - } - - if result.Error != nil { - err = result.Error - } - - return result.Schema, err -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIInputServer{ - UIInput: input, - }) - - var resp ResourceProviderInputResponse - args := ResourceProviderInputArgs{ - InputId: id, - Config: c, - } - - err := p.Client.Call("Plugin.Input", &args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - return nil, err - } - - return resp.Config, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResponse - args := ResourceProviderValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateResource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - var resp ResourceProviderConfigureResponse - err := p.Client.Call("Plugin.Configure", c, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderApplyResponse - args := &ResourceProviderApplyArgs{ - Info: info, - State: s, - Diff: d, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderDiffResponse - args := &ResourceProviderDiffArgs{ - Info: info, - State: s, - Config: c, - } - err := p.Client.Call("Plugin.Diff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - var resp ResourceProviderRefreshResponse - args := &ResourceProviderRefreshArgs{ - Info: info, - State: s, - } - - err := p.Client.Call("Plugin.Refresh", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - var resp ResourceProviderImportStateResponse - args := &ResourceProviderImportStateArgs{ - Info: info, - Id: id, - } - - err := p.Client.Call("Plugin.ImportState", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - var result []terraform.ResourceType - - err := p.Client.Call("Plugin.Resources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderReadDataDiffResponse - args := &ResourceProviderReadDataDiffArgs{ - Info: info, - Config: c, - } - - err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderReadDataApplyResponse - args := &ResourceProviderReadDataApplyArgs{ - Info: info, - Diff: d, - } - - err := p.Client.Call("Plugin.ReadDataApply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) DataSources() []terraform.DataSource { - var result []terraform.DataSource - - err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) Close() error { - return p.Client.Close() -} - -// ResourceProviderServer is a net/rpc compatible structure for serving -// a ResourceProvider. This should not be used directly. -type ResourceProviderServer struct { - Broker *plugin.MuxBroker - Provider terraform.ResourceProvider -} - -type ResourceProviderStopResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderGetSchemaArgs struct { - Req *terraform.ProviderSchemaRequest -} - -type ResourceProviderGetSchemaResponse struct { - Schema *terraform.ProviderSchema - Error *plugin.BasicError -} - -type ResourceProviderConfigureResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderInputArgs struct { - InputId uint32 - Config *terraform.ResourceConfig -} - -type ResourceProviderInputResponse struct { - Config *terraform.ResourceConfig - Error *plugin.BasicError -} - -type ResourceProviderApplyArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Diff *terraform.InstanceDiff -} - -type ResourceProviderApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderDiffArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProviderDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderRefreshArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState -} - -type ResourceProviderRefreshResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderImportStateArgs struct { - Info *terraform.InstanceInfo - Id string -} - -type ResourceProviderImportStateResponse struct { - State []*terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataApplyArgs struct { - Info *terraform.InstanceInfo - Diff *terraform.InstanceDiff -} - -type ResourceProviderReadDataApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataDiffArgs struct { - Info *terraform.InstanceInfo - Config *terraform.ResourceConfig -} - -type ResourceProviderReadDataDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProviderValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProviderValidateResourceArgs struct { - Config *terraform.ResourceConfig - Type string -} - -type ResourceProviderValidateResourceResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -func (s *ResourceProviderServer) Stop( - _ interface{}, - reply *ResourceProviderStopResponse) error { - err := s.Provider.Stop() - *reply = ResourceProviderStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) GetSchema( - args *ResourceProviderGetSchemaArgs, - result *ResourceProviderGetSchemaResponse, -) error { - schema, err := s.Provider.GetSchema(args.Req) - result.Schema = schema - if err != nil { - result.Error = plugin.NewBasicError(err) - } - return nil -} - -func (s *ResourceProviderServer) Input( - args *ResourceProviderInputArgs, - reply *ResourceProviderInputResponse) error { - conn, err := s.Broker.Dial(args.InputId) - if err != nil { - *reply = ResourceProviderInputResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - input := &UIInput{Client: client} - - config, err := s.Provider.Input(input, args.Config) - *reply = ResourceProviderInputResponse{ - Config: config, - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) Validate( - args *ResourceProviderValidateArgs, - reply *ResourceProviderValidateResponse) error { - warns, errs := s.Provider.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ValidateResource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateResource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) Configure( - config *terraform.ResourceConfig, - reply *ResourceProviderConfigureResponse) error { - err := s.Provider.Configure(config) - *reply = ResourceProviderConfigureResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Apply( - args *ResourceProviderApplyArgs, - result *ResourceProviderApplyResponse) error { - state, err := s.Provider.Apply(args.Info, args.State, args.Diff) - *result = ResourceProviderApplyResponse{ - State: state, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Diff( - args *ResourceProviderDiffArgs, - result *ResourceProviderDiffResponse) error { - diff, err := s.Provider.Diff(args.Info, args.State, args.Config) - *result = ResourceProviderDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Refresh( - args *ResourceProviderRefreshArgs, - result *ResourceProviderRefreshResponse) error { - newState, err := s.Provider.Refresh(args.Info, args.State) - *result = ResourceProviderRefreshResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ImportState( - args *ResourceProviderImportStateArgs, - result *ResourceProviderImportStateResponse) error { - states, err := s.Provider.ImportState(args.Info, args.Id) - *result = ResourceProviderImportStateResponse{ - State: states, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Resources( - nothing interface{}, - result *[]terraform.ResourceType) error { - *result = s.Provider.Resources() - return nil -} - -func (s *ResourceProviderServer) ValidateDataSource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ReadDataDiff( - args *ResourceProviderReadDataDiffArgs, - result *ResourceProviderReadDataDiffResponse) error { - diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) - *result = ResourceProviderReadDataDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ReadDataApply( - args *ResourceProviderReadDataApplyArgs, - result *ResourceProviderReadDataApplyResponse) error { - newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) - *result = ResourceProviderReadDataApplyResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) DataSources( - nothing interface{}, - result *[]terraform.DataSource) error { - *result = s.Provider.DataSources() - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go deleted file mode 100644 index f0cc341f..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go +++ /dev/null @@ -1,182 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" -) - -// ResourceProvisionerPlugin is the plugin.Plugin implementation. -type ResourceProvisionerPlugin struct { - ResourceProvisioner func() terraform.ResourceProvisioner -} - -func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProvisionerServer{ - Broker: b, - Provisioner: p.ResourceProvisioner(), - }, nil -} - -func (p *ResourceProvisionerPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvisioner{Broker: b, Client: c}, nil -} - -// ResourceProvisioner is an implementation of terraform.ResourceProvisioner -// that communicates over RPC. -type ResourceProvisioner struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { - panic("not implemented") - return nil, nil -} - -func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProvisionerValidateResponse - args := ResourceProvisionerValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvisioner) Apply( - output terraform.UIOutput, - s *terraform.InstanceState, - c *terraform.ResourceConfig) error { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIOutputServer{ - UIOutput: output, - }) - - var resp ResourceProvisionerApplyResponse - args := &ResourceProvisionerApplyArgs{ - OutputId: id, - State: s, - Config: c, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvisioner) Stop() error { - var resp ResourceProvisionerStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvisioner) Close() error { - return p.Client.Close() -} - -type ResourceProvisionerValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProvisionerValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProvisionerApplyArgs struct { - OutputId uint32 - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProvisionerApplyResponse struct { - Error *plugin.BasicError -} - -type ResourceProvisionerStopResponse struct { - Error *plugin.BasicError -} - -// ResourceProvisionerServer is a net/rpc compatible structure for serving -// a ResourceProvisioner. This should not be used directly. -type ResourceProvisionerServer struct { - Broker *plugin.MuxBroker - Provisioner terraform.ResourceProvisioner -} - -func (s *ResourceProvisionerServer) Apply( - args *ResourceProvisionerApplyArgs, - result *ResourceProvisionerApplyResponse) error { - conn, err := s.Broker.Dial(args.OutputId) - if err != nil { - *result = ResourceProvisionerApplyResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - output := &UIOutput{Client: client} - - err = s.Provisioner.Apply(output, args.State, args.Config) - *result = ResourceProvisionerApplyResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProvisionerServer) Validate( - args *ResourceProvisionerValidateArgs, - reply *ResourceProvisionerValidateResponse) error { - warns, errs := s.Provisioner.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProvisionerValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProvisionerServer) Stop( - _ interface{}, - reply *ResourceProvisionerStopResponse) error { - err := s.Provisioner.Stop() - *reply = ResourceProvisionerStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go deleted file mode 100644 index 8d056c59..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/serve.go +++ /dev/null @@ -1,121 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/go-plugin" - grpcplugin "github.com/hashicorp/terraform/helper/plugin" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/terraform" -) - -const ( - // The constants below are the names of the plugins that can be dispensed - // from the plugin server. - ProviderPluginName = "provider" - ProvisionerPluginName = "provisioner" - - // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify - // a particular version during their handshake. This is the version used when Terraform 0.10 - // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must - // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and - // 0.11. - DefaultProtocolVersion = 4 -) - -// Handshake is the HandshakeConfig used to configure clients and servers. -var Handshake = plugin.HandshakeConfig{ - // The ProtocolVersion is the version that must match between TF core - // and TF plugins. This should be bumped whenever a change happens in - // one or the other that makes it so that they can't safely communicate. - // This could be adding a new interface value, it could be how - // helper/schema computes diffs, etc. - ProtocolVersion: DefaultProtocolVersion, - - // The magic cookie values should NEVER be changed. - MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", - MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", -} - -type ProviderFunc func() terraform.ResourceProvider -type ProvisionerFunc func() terraform.ResourceProvisioner -type GRPCProviderFunc func() proto.ProviderServer -type GRPCProvisionerFunc func() proto.ProvisionerServer - -// ServeOpts are the configurations to serve a plugin. -type ServeOpts struct { - ProviderFunc ProviderFunc - ProvisionerFunc ProvisionerFunc - - // Wrapped versions of the above plugins will automatically shimmed and - // added to the GRPC functions when possible. - GRPCProviderFunc GRPCProviderFunc - GRPCProvisionerFunc GRPCProvisionerFunc -} - -// Serve serves a plugin. This function never returns and should be the final -// function called in the main function of the plugin. -func Serve(opts *ServeOpts) { - // since the plugins may not yet be aware of the new protocol, we - // automatically wrap the plugins in the grpc shims. - if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { - provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) - // this is almost always going to be a *schema.Provider, but check that - // we got back a valid provider just in case. - if provider != nil { - opts.GRPCProviderFunc = func() proto.ProviderServer { - return provider - } - } - } - if opts.GRPCProvisionerFunc == nil && opts.ProvisionerFunc != nil { - provisioner := grpcplugin.NewGRPCProvisionerServerShim(opts.ProvisionerFunc()) - if provisioner != nil { - opts.GRPCProvisionerFunc = func() proto.ProvisionerServer { - return provisioner - } - } - } - - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - VersionedPlugins: pluginSet(opts), - GRPCServer: plugin.DefaultGRPCServer, - }) -} - -// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring -// a plugin server or client. -func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { - return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{ - ResourceProvider: opts.ProviderFunc, - }, - "provisioner": &ResourceProvisionerPlugin{ - ResourceProvisioner: opts.ProvisionerFunc, - }, - } -} - -func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { - // Set the legacy netrpc plugins at version 4. - // The oldest version is returned in when executed by a legacy go-plugin - // client. - plugins := map[int]plugin.PluginSet{ - 4: legacyPluginMap(opts), - } - - // add the new protocol versions if they're configured - if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { - plugins[5] = plugin.PluginSet{} - if opts.GRPCProviderFunc != nil { - plugins[5]["provider"] = &GRPCProviderPlugin{ - GRPCProvider: opts.GRPCProviderFunc, - } - } - if opts.GRPCProvisionerFunc != nil { - plugins[5]["provisioner"] = &GRPCProvisionerPlugin{ - GRPCProvisioner: opts.GRPCProvisionerFunc, - } - } - } - return plugins -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go deleted file mode 100644 index 3469e6a9..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go +++ /dev/null @@ -1,52 +0,0 @@ -package plugin - -import ( - "context" - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// UIInput is an implementation of terraform.UIInput that communicates -// over RPC. -type UIInput struct { - Client *rpc.Client -} - -func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { - var resp UIInputInputResponse - err := i.Client.Call("Plugin.Input", opts, &resp) - if err != nil { - return "", err - } - if resp.Error != nil { - err = resp.Error - return "", err - } - - return resp.Value, nil -} - -type UIInputInputResponse struct { - Value string - Error *plugin.BasicError -} - -// UIInputServer is a net/rpc compatible structure for serving -// a UIInputServer. This should not be used directly. -type UIInputServer struct { - UIInput terraform.UIInput -} - -func (s *UIInputServer) Input( - opts *terraform.InputOpts, - reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(context.Background(), opts) - *reply = UIInputInputResponse{ - Value: value, - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go deleted file mode 100644 index c222b00c..00000000 --- a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go +++ /dev/null @@ -1,29 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/terraform/terraform" -) - -// UIOutput is an implementatin of terraform.UIOutput that communicates -// over RPC. -type UIOutput struct { - Client *rpc.Client -} - -func (o *UIOutput) Output(v string) { - o.Client.Call("Plugin.Output", v, new(interface{})) -} - -// UIOutputServer is the RPC server for serving UIOutput. -type UIOutputServer struct { - UIOutput terraform.UIOutput -} - -func (s *UIOutputServer) Output( - v string, - reply *interface{}) error { - s.UIOutput.Output(v) - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go deleted file mode 100644 index 46bb80b6..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go +++ /dev/null @@ -1,47 +0,0 @@ -package providers - -import ( - "sort" - - "github.com/hashicorp/terraform/addrs" -) - -// AddressedTypes is a helper that extracts all of the distinct provider -// types from the given list of relative provider configuration addresses. -func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]struct{}{} - for _, addr := range providerAddrs { - m[addr.Type.LegacyString()] = struct{}{} - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - return names -} - -// AddressedTypesAbs is a helper that extracts all of the distinct provider -// types from the given list of absolute provider configuration addresses. -func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { - if len(providerAddrs) == 0 { - return nil - } - m := map[string]struct{}{} - for _, addr := range providerAddrs { - m[addr.ProviderConfig.Type.LegacyString()] = struct{}{} - } - - names := make([]string, 0, len(m)) - for typeName := range m { - names = append(names, typeName) - } - - sort.Strings(names) // Stable result for tests - return names -} diff --git a/vendor/github.com/hashicorp/terraform/providers/doc.go b/vendor/github.com/hashicorp/terraform/providers/doc.go deleted file mode 100644 index 39aa1de6..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package providers contains the interface and primary types required to -// implement a Terraform resource provider. -package providers diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go deleted file mode 100644 index 7e0a74c5..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/provider.go +++ /dev/null @@ -1,359 +0,0 @@ -package providers - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// Interface represents the set of methods required for a complete resource -// provider plugin. -type Interface interface { - // GetSchema returns the complete schema for the provider. - GetSchema() GetSchemaResponse - - // PrepareProviderConfig allows the provider to validate the configuration - // values, and set or override any values with defaults. - PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse - - // ValidateResourceTypeConfig allows the provider to validate the resource - // configuration values. - ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse - - // ValidateDataSource allows the provider to validate the data source - // configuration values. - ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse - - // UpgradeResourceState is called when the state loader encounters an - // instance state whose schema version is less than the one reported by the - // currently-used version of the corresponding provider, and the upgraded - // result is used for any further processing. - UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse - - // Configure configures and initialized the provider. - Configure(ConfigureRequest) ConfigureResponse - - // Stop is called when the provider should halt any in-flight actions. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provider after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // ReadResource refreshes a resource and returns its current state. - ReadResource(ReadResourceRequest) ReadResourceResponse - - // PlanResourceChange takes the current state and proposed state of a - // resource, and returns the planned final state. - PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse - - // ApplyResourceChange takes the planned state for a resource, which may - // yet contain unknown computed values, and applies the changes returning - // the final state. - ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse - - // ImportResourceState requests that the given resource be imported. - ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse - - // ReadDataSource returns the data source's current state. - ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provider is the schema for the provider itself. - Provider Schema - - // ResourceTypes map the resource type name to that type's schema. - ResourceTypes map[string]Schema - - // DataSources maps the data source name to that data source's schema. - DataSources map[string]Schema - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// Schema pairs a provider or resource schema with that schema's version. -// This is used to be able to upgrade the schema in UpgradeResourceState. -type Schema struct { - Version int64 - Block *configschema.Block -} - -type PrepareProviderConfigRequest struct { - // Config is the raw configuration value for the provider. - Config cty.Value -} - -type PrepareProviderConfigResponse struct { - // PreparedConfig is the configuration as prepared by the provider. - PreparedConfig cty.Value - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateResourceTypeConfigRequest struct { - // TypeName is the name of the resource type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateResourceTypeConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ValidateDataSourceConfigRequest struct { - // TypeName is the name of the data source type to validate. - TypeName string - - // Config is the configuration value to validate, which may contain unknown - // values. - Config cty.Value -} - -type ValidateDataSourceConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type UpgradeResourceStateRequest struct { - // TypeName is the name of the resource type being upgraded - TypeName string - - // Version is version of the schema that created the current state. - Version int64 - - // RawStateJSON and RawStateFlatmap contiain the state that needs to be - // upgraded to match the current schema version. Because the schema is - // unknown, this contains only the raw data as stored in the state. - // RawStateJSON is the current json state encoding. - // RawStateFlatmap is the legacy flatmap encoding. - // Only on of these fields may be set for the upgrade request. - RawStateJSON []byte - RawStateFlatmap map[string]string -} - -type UpgradeResourceStateResponse struct { - // UpgradedState is the newly upgraded resource state. - UpgradedState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ConfigureRequest struct { - // Terraform version is the version string from the running instance of - // terraform. Providers can use TerraformVersion to verify compatibility, - // and to store for informational purposes. - TerraformVersion string - - // Config is the complete configuration value for the provider. - Config cty.Value -} - -type ConfigureResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ReadResourceRequest struct { - // TypeName is the name of the resource type being read. - TypeName string - - // PriorState contains the previously saved state value for this resource. - PriorState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type ReadResourceResponse struct { - // NewState contains the current state of the resource. - NewState cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -type PlanResourceChangeRequest struct { - // TypeName is the name of the resource type to plan. - TypeName string - - // PriorState is the previously saved state value for this resource. - PriorState cty.Value - - // ProposedNewState is the expected state after the new configuration is - // applied. This is created by directly applying the configuration to the - // PriorState. The provider is then responsible for applying any further - // changes required to create the proposed final state. - ProposedNewState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the ProposedNewState in most circumstances. - Config cty.Value - - // PriorPrivate is the previously saved private data returned from the - // provider during the last apply. - PriorPrivate []byte -} - -type PlanResourceChangeResponse struct { - // PlannedState is the expected state of the resource once the current - // configuration is applied. - PlannedState cty.Value - - // RequiresReplace is the list of thee attributes that are requiring - // resource replacement. - RequiresReplace []cty.Path - - // PlannedPrivate is an opaque blob that is not interpreted by terraform - // core. This will be saved and relayed back to the provider during - // ApplyResourceChange. - PlannedPrivate []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ApplyResourceChangeRequest struct { - // TypeName is the name of the resource type being applied. - TypeName string - - // PriorState is the current state of resource. - PriorState cty.Value - - // Planned state is the state returned from PlanResourceChange, and should - // represent the new state, minus any remaining computed attributes. - PlannedState cty.Value - - // Config is the resource configuration, before being merged with the - // PriorState. Any value not explicitly set in the configuration will be - // null. Config is supplied for reference, but Provider implementations - // should prefer the PlannedState in most circumstances. - Config cty.Value - - // PlannedPrivate is the same value as returned by PlanResourceChange. - PlannedPrivate []byte -} - -type ApplyResourceChangeResponse struct { - // NewState is the new complete state after applying the planned change. - // In the event of an error, NewState should represent the most recent - // known state of the resource, if it exists. - NewState cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics - - // LegacyTypeSystem is set only if the provider is using the legacy SDK - // whose type system cannot be precisely mapped into the Terraform type - // system. We use this to bypass certain consistency checks that would - // otherwise fail due to this imprecise mapping. No other provider or SDK - // implementation is permitted to set this. - LegacyTypeSystem bool -} - -type ImportResourceStateRequest struct { - // TypeName is the name of the resource type to be imported. - TypeName string - - // ID is a string with which the provider can identify the resource to be - // imported. - ID string -} - -type ImportResourceStateResponse struct { - // ImportedResources contains one or more state values related to the - // imported resource. It is not required that these be complete, only that - // there is enough identifying information for the provider to successfully - // update the states in ReadResource. - ImportedResources []ImportedResource - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// ImportedResource represents an object being imported into Terraform with the -// help of a provider. An ImportedObject is a RemoteObject that has been read -// by the provider's import handler but hasn't yet been committed to state. -type ImportedResource struct { - // TypeName is the name of the resource type associated with the - // returned state. It's possible for providers to import multiple related - // types with a single import request. - TypeName string - - // State is the state of the remote object being imported. This may not be - // complete, but must contain enough information to uniquely identify the - // resource. - State cty.Value - - // Private is an opaque blob that will be stored in state along with the - // resource. It is intended only for interpretation by the provider itself. - Private []byte -} - -// AsInstanceObject converts the receiving ImportedObject into a -// ResourceInstanceObject that has status ObjectReady. -// -// The returned object does not know its own resource type, so the caller must -// retain the ResourceType value from the source object if this information is -// needed. -// -// The returned object also has no dependency addresses, but the caller may -// freely modify the direct fields of the returned object without affecting -// the receiver. -func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { - return &states.ResourceInstanceObject{ - Status: states.ObjectReady, - Value: ir.State, - Private: ir.Private, - } -} - -type ReadDataSourceRequest struct { - // TypeName is the name of the data source type to Read. - TypeName string - - // Config is the complete configuration for the requested data source. - Config cty.Value -} - -type ReadDataSourceResponse struct { - // State is the current state of the requested data source. - State cty.Value - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform/providers/resolver.go b/vendor/github.com/hashicorp/terraform/providers/resolver.go deleted file mode 100644 index 2ef387e4..00000000 --- a/vendor/github.com/hashicorp/terraform/providers/resolver.go +++ /dev/null @@ -1,114 +0,0 @@ -package providers - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plugin/discovery" -) - -// Resolver is an interface implemented by objects that are able to resolve -// a given set of resource provider version constraints into Factory -// callbacks. -type Resolver interface { - // Given a constraint map, return a Factory for each requested provider. - // If some or all of the constraints cannot be satisfied, return a non-nil - // slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[addrs.Provider]Factory, []error) -} - -// ResolverFunc wraps a callback function and turns it into a Resolver -// implementation, for convenience in situations where a function and its -// associated closure are sufficient as a resolver implementation. -type ResolverFunc func(reqd discovery.PluginRequirements) (map[addrs.Provider]Factory, []error) - -// ResolveProviders implements Resolver by calling the -// wrapped function. -func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[addrs.Provider]Factory, []error) { - return f(reqd) -} - -// ResolverFixed returns a Resolver that has a fixed set of provider factories -// provided by the caller. The returned resolver ignores version constraints -// entirely and just returns the given factory for each requested provider -// name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResolverFixed(factories map[addrs.Provider]Factory) Resolver { - return ResolverFunc(func(reqd discovery.PluginRequirements) (map[addrs.Provider]Factory, []error) { - ret := make(map[addrs.Provider]Factory, len(reqd)) - var errs []error - for name := range reqd { - fqn := addrs.NewLegacyProvider(name) - if factory, exists := factories[fqn]; exists { - ret[fqn] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// Factory is a function type that creates a new instance of a resource -// provider, or returns an error if that is impossible. -type Factory func() (Interface, error) - -// FactoryFixed is a helper that creates a Factory that just returns some given -// single provider. -// -// Unlike usual factories, the exact same instance is returned for each call -// to the factory and so this must be used in only specialized situations where -// the caller can take care to either not mutate the given provider at all -// or to mutate it in ways that will not cause unexpected behavior for others -// holding the same reference. -func FactoryFixed(p Interface) Factory { - return func() (Interface, error) { - return p, nil - } -} - -// ProviderHasResource is a helper that requests schema from the given provider -// and checks if it has a resource type of the given name. -// -// This function is more expensive than it may first appear since it must -// retrieve the entire schema from the underlying provider, and so it should -// be used sparingly and especially not in tight loops. -// -// Since retrieving the provider may fail (e.g. if the provider is accessed -// over an RPC channel that has operational problems), this function will -// return false if the schema cannot be retrieved, under the assumption that -// a subsequent call to do anything with the resource type would fail -// anyway. -func ProviderHasResource(provider Interface, typeName string) bool { - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - return false - } - - _, exists := resp.ResourceTypes[typeName] - return exists -} - -// ProviderHasDataSource is a helper that requests schema from the given -// provider and checks if it has a data source of the given name. -// -// This function is more expensive than it may first appear since it must -// retrieve the entire schema from the underlying provider, and so it should -// be used sparingly and especially not in tight loops. -// -// Since retrieving the provider may fail (e.g. if the provider is accessed -// over an RPC channel that has operational problems), this function will -// return false if the schema cannot be retrieved, under the assumption that -// a subsequent call to do anything with the data source would fail -// anyway. -func ProviderHasDataSource(provider Interface, dataSourceName string) bool { - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - return false - } - - _, exists := resp.DataSources[dataSourceName] - return exists -} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/doc.go b/vendor/github.com/hashicorp/terraform/provisioners/doc.go deleted file mode 100644 index b03ba9a1..00000000 --- a/vendor/github.com/hashicorp/terraform/provisioners/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package provisioners contains the interface and primary types to implement a -// Terraform resource provisioner. -package provisioners diff --git a/vendor/github.com/hashicorp/terraform/provisioners/factory.go b/vendor/github.com/hashicorp/terraform/provisioners/factory.go deleted file mode 100644 index 590b97a8..00000000 --- a/vendor/github.com/hashicorp/terraform/provisioners/factory.go +++ /dev/null @@ -1,19 +0,0 @@ -package provisioners - -// Factory is a function type that creates a new instance of a resource -// provisioner, or returns an error if that is impossible. -type Factory func() (Interface, error) - -// FactoryFixed is a helper that creates a Factory that just returns some given -// single provisioner. -// -// Unlike usual factories, the exact same instance is returned for each call -// to the factory and so this must be used in only specialized situations where -// the caller can take care to either not mutate the given provider at all -// or to mutate it in ways that will not cause unexpected behavior for others -// holding the same reference. -func FactoryFixed(p Interface) Factory { - return func() (Interface, error) { - return p, nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go deleted file mode 100644 index e53c8848..00000000 --- a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go +++ /dev/null @@ -1,82 +0,0 @@ -package provisioners - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// Interface is the set of methods required for a resource provisioner plugin. -type Interface interface { - // GetSchema returns the schema for the provisioner configuration. - GetSchema() GetSchemaResponse - - // ValidateProvisionerConfig allows the provisioner to validate the - // configuration values. - ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse - - // ProvisionResource runs the provisioner with provided configuration. - // ProvisionResource blocks until the execution is complete. - // If the returned diagnostics contain any errors, the resource will be - // left in a tainted state. - ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse - - // Stop is called to interrupt the provisioner. - // - // Stop should not block waiting for in-flight actions to complete. It - // should take any action it wants and return immediately acknowledging it - // has received the stop request. Terraform will not make any further API - // calls to the provisioner after Stop is called. - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - // Close shuts down the plugin process if applicable. - Close() error -} - -type GetSchemaResponse struct { - // Provisioner contains the schema for this provisioner. - Provisioner *configschema.Block - - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -// UIOutput provides the Output method for resource provisioner -// plugins to write any output to the UI. -// -// Provisioners may call the Output method multiple times while Apply is in -// progress. It is invalid to call Output after Apply returns. -type UIOutput interface { - Output(string) -} - -type ValidateProvisionerConfigRequest struct { - // Config is the complete configuration to be used for the provisioner. - Config cty.Value -} - -type ValidateProvisionerConfigResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} - -type ProvisionResourceRequest struct { - // Config is the complete provisioner configuration. - Config cty.Value - - // Connection contains any information required to access the resource - // instance. - Connection cty.Value - - // UIOutput is used to return output during the Apply operation. - UIOutput UIOutput -} - -type ProvisionResourceResponse struct { - // Diagnostics contains any warnings or errors from the method call. - Diagnostics tfdiags.Diagnostics -} diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go deleted file mode 100644 index c5185432..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/client.go +++ /dev/null @@ -1,446 +0,0 @@ -package registry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-retryablehttp" - svchost "github.com/hashicorp/terraform-svchost" - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/httpclient" - "github.com/hashicorp/terraform/registry/regsrc" - "github.com/hashicorp/terraform/registry/response" - "github.com/hashicorp/terraform/version" -) - -const ( - xTerraformGet = "X-Terraform-Get" - xTerraformVersion = "X-Terraform-Version" - modulesServiceID = "modules.v1" - providersServiceID = "providers.v1" - - // registryDiscoveryRetryEnvName is the name of the environment variable that - // can be configured to customize number of retries for module and provider - // discovery requests with the remote registry. - registryDiscoveryRetryEnvName = "TF_REGISTRY_DISCOVERY_RETRY" - defaultRetry = 1 - - // registryClientTimeoutEnvName is the name of the environment variable that - // can be configured to customize the timeout duration (seconds) for module - // and provider discovery with the remote registry. - registryClientTimeoutEnvName = "TF_REGISTRY_CLIENT_TIMEOUT" - - // defaultRequestTimeout is the default timeout duration for requests to the - // remote registry. - defaultRequestTimeout = 10 * time.Second -) - -var ( - tfVersion = version.String() - - discoveryRetry int - requestTimeout time.Duration -) - -func init() { - configureDiscoveryRetry() - configureRequestTimeout() -} - -// Client provides methods to query Terraform Registries. -type Client struct { - // this is the client to be used for all requests. - client *retryablehttp.Client - - // services is a required *disco.Disco, which may have services and - // credentials pre-loaded. - services *disco.Disco - - // retry is the number of retries the client will attempt for each request - // if it runs into a transient failure with the remote registry. - retry int -} - -// NewClient returns a new initialized registry client. -func NewClient(services *disco.Disco, client *http.Client) *Client { - if services == nil { - services = disco.New() - } - - if client == nil { - client = httpclient.New() - client.Timeout = requestTimeout - } - retryableClient := retryablehttp.NewClient() - retryableClient.HTTPClient = client - retryableClient.RetryMax = discoveryRetry - retryableClient.RequestLogHook = requestLogHook - retryableClient.ErrorHandler = maxRetryErrorHandler - - logOutput, err := logging.LogOutput() - if err != nil { - log.Printf("[WARN] Failed to set up registry client logger, "+ - "continuing without client logging: %s", err) - } - retryableClient.Logger = log.New(logOutput, "", log.Flags()) - - services.Transport = retryableClient.HTTPClient.Transport - - services.SetUserAgent(httpclient.TerraformUserAgent(version.String())) - - return &Client{ - client: retryableClient, - services: services, - } -} - -// Discover queries the host, and returns the url for the registry. -func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { - service, err := c.services.DiscoverServiceURL(host, serviceID) - if err != nil { - return nil, &ServiceUnreachableError{err} - } - if !strings.HasSuffix(service.Path, "/") { - service.Path += "/" - } - return service, nil -} - -// ModuleVersions queries the registry for a module, and returns the available versions. -func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { - host, err := module.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(module.Module(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching module versions from %q", service) - - req, err := retryablehttp.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req.Request) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errModuleNotFound{addr: module} - default: - return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) - } - - var versions response.ModuleVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - for _, mod := range versions.Modules { - for _, v := range mod.Versions { - log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) - } - } - - return &versions, nil -} - -func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { - creds, err := c.services.CredentialsForHost(host) - if err != nil { - log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) - return - } - - if creds != nil { - creds.PrepareRequest(req) - } -} - -// ModuleLocation find the download location for a specific version module. -// This returns a string, because the final location may contain special go-getter syntax. -func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { - host, err := module.SvcHost() - if err != nil { - return "", err - } - - service, err := c.Discover(host, modulesServiceID) - if err != nil { - return "", err - } - - var p *url.URL - if version == "" { - p, err = url.Parse(path.Join(module.Module(), "download")) - } else { - p, err = url.Parse(path.Join(module.Module(), version, "download")) - } - if err != nil { - return "", err - } - download := service.ResolveReference(p) - - log.Printf("[DEBUG] looking up module location from %q", download) - - req, err := retryablehttp.NewRequest("GET", download.String(), nil) - if err != nil { - return "", err - } - - c.addRequestCreds(host, req.Request) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // there should be no body, but save it for logging - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("error reading response body from registry: %s", err) - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return "", fmt.Errorf("module %q version %q not found", module, version) - default: - // anything else is an error: - return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) - } - - // the download location is in the X-Terraform-Get header - location := resp.Header.Get(xTerraformGet) - if location == "" { - return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) - } - - // If location looks like it's trying to be a relative URL, treat it as - // one. - // - // We don't do this for just _any_ location, since the X-Terraform-Get - // header is a go-getter location rather than a URL, and so not all - // possible values will parse reasonably as URLs.) - // - // When used in conjunction with go-getter we normally require this header - // to be an absolute URL, but we are more liberal here because third-party - // registry implementations may not "know" their own absolute URLs if - // e.g. they are running behind a reverse proxy frontend, or such. - if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { - locationURL, err := url.Parse(location) - if err != nil { - return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) - } - locationURL = download.ResolveReference(locationURL) - location = locationURL.String() - } - - return location, nil -} - -// TerraformProviderVersions queries the registry for a provider, and returns the available versions. -func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { - host, err := provider.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, providersServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching provider versions from %q", service) - - req, err := retryablehttp.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req.Request) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - // OK - case http.StatusNotFound: - return nil, &errProviderNotFound{addr: provider} - default: - return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) - } - - var versions response.TerraformProviderVersions - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&versions); err != nil { - return nil, err - } - - return &versions, nil -} - -// TerraformProviderLocation queries the registry for a provider download metadata -func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { - host, err := provider.SvcHost() - if err != nil { - return nil, err - } - - service, err := c.Discover(host, providersServiceID) - if err != nil { - return nil, err - } - - p, err := url.Parse(path.Join( - provider.TerraformProvider(), - version, - "download", - provider.OS, - provider.Arch, - )) - if err != nil { - return nil, err - } - - service = service.ResolveReference(p) - - log.Printf("[DEBUG] fetching provider location from %q", service) - - req, err := retryablehttp.NewRequest("GET", service.String(), nil) - if err != nil { - return nil, err - } - - c.addRequestCreds(host, req.Request) - req.Header.Set(xTerraformVersion, tfVersion) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var loc response.TerraformProviderPlatformLocation - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&loc); err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusNoContent: - // OK - case http.StatusNotFound: - return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) - default: - // anything else is an error: - return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) - } - - return &loc, nil -} - -// configureDiscoveryRetry configures the number of retries the registry client -// will attempt for requests with retryable errors, like 502 status codes -func configureDiscoveryRetry() { - discoveryRetry = defaultRetry - - if v := os.Getenv(registryDiscoveryRetryEnvName); v != "" { - retry, err := strconv.Atoi(v) - if err == nil && retry > 0 { - discoveryRetry = retry - } - } -} - -func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { - if i > 0 { - logger.Printf("[INFO] Previous request to the remote registry failed, attempting retry.") - } -} - -func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { - // Close the body per library instructions - if resp != nil { - resp.Body.Close() - } - - // Additional error detail: if we have a response, use the status code; - // if we have an error, use that; otherwise nothing. We will never have - // both response and error. - var errMsg string - if resp != nil { - errMsg = fmt.Sprintf(": %d", resp.StatusCode) - } else if err != nil { - errMsg = fmt.Sprintf(": %s", err) - } - - // This function is always called with numTries=RetryMax+1. If we made any - // retry attempts, include that in the error message. - if numTries > 1 { - return resp, fmt.Errorf("the request failed after %d attempts, please try again later%s", - numTries, errMsg) - } - return resp, fmt.Errorf("the request failed, please try again later%s", errMsg) -} - -// configureRequestTimeout configures the registry client request timeout from -// environment variables -func configureRequestTimeout() { - requestTimeout = defaultRequestTimeout - - if v := os.Getenv(registryClientTimeoutEnvName); v != "" { - timeout, err := strconv.Atoi(v) - if err == nil && timeout > 0 { - requestTimeout = time.Duration(timeout) * time.Second - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go deleted file mode 100644 index 3b99b34d..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/errors.go +++ /dev/null @@ -1,63 +0,0 @@ -package registry - -import ( - "fmt" - - "github.com/hashicorp/terraform-svchost/disco" - "github.com/hashicorp/terraform/registry/regsrc" -) - -type errModuleNotFound struct { - addr *regsrc.Module -} - -func (e *errModuleNotFound) Error() string { - return fmt.Sprintf("module %s not found", e.addr) -} - -// IsModuleNotFound returns true only if the given error is a "module not found" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsModuleNotFound(err error) bool { - _, ok := err.(*errModuleNotFound) - return ok -} - -type errProviderNotFound struct { - addr *regsrc.TerraformProvider -} - -func (e *errProviderNotFound) Error() string { - return fmt.Sprintf("provider %s not found", e.addr) -} - -// IsProviderNotFound returns true only if the given error is a "provider not found" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsProviderNotFound(err error) bool { - _, ok := err.(*errProviderNotFound) - return ok -} - -// IsServiceNotProvided returns true only if the given error is a "service not provided" -// error. This allows callers to recognize this particular error condition -// as distinct from operational errors such as poor network connectivity. -func IsServiceNotProvided(err error) bool { - _, ok := err.(*disco.ErrServiceNotProvided) - return ok -} - -// ServiceUnreachableError Registry service is unreachable -type ServiceUnreachableError struct { - err error -} - -func (e *ServiceUnreachableError) Error() string { - return e.err.Error() -} - -// IsServiceUnreachable returns true if the registry/discovery service was unreachable -func IsServiceUnreachable(err error) bool { - _, ok := err.(*ServiceUnreachableError) - return ok -} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go deleted file mode 100644 index c9bc40be..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go +++ /dev/null @@ -1,140 +0,0 @@ -package regsrc - -import ( - "regexp" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - // InvalidHostString is a placeholder returned when a raw host can't be - // converted by IDNA spec. It will never be returned for any host for which - // Valid() is true. - InvalidHostString = "" - - // urlLabelEndSubRe is a sub-expression that matches any character that's - // allowed at the start or end of a URL label according to RFC1123. - urlLabelEndSubRe = "[0-9A-Za-z]" - - // urlLabelEndSubRe is a sub-expression that matches any character that's - // allowed at in a non-start or end of a URL label according to RFC1123. - urlLabelMidSubRe = "[0-9A-Za-z-]" - - // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char - // in an IDN (Unicode) display URL. It's not strict - there are only ~15k - // valid Unicode points in IDN RFC (some with conditions). We are just going - // with being liberal with matching and then erroring if we fail to convert - // to punycode later (which validates chars fully). This at least ensures - // ascii chars dissalowed by the RC1123 parts above don't become legal - // again. - urlLabelUnicodeSubRe = "[^[:ascii:]]" - - // hostLabelSubRe is the sub-expression that matches a valid hostname label. - // It does not anchor the start or end so it can be composed into more - // complex RegExps below. Note that for sanity we don't handle disallowing - // raw punycode in this regexp (esp. since re2 doesn't support negative - // lookbehind, but we can capture it's presence here to check later). - hostLabelSubRe = "" + - // Match valid initial char, or unicode char - "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + - // Optionally, match 0 to 61 valid URL or Unicode chars, - // followed by one valid end char or unicode char - "(?:" + - "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + - "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + - ")?" - - // hostSubRe is the sub-expression that matches a valid host prefix. - // Allows custom port. - hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" - - // hostRe is a regexp that matches a valid host prefix. Additional - // validation of unicode strings is needed for matches. - hostRe = regexp.MustCompile("^" + hostSubRe + "$") -) - -// FriendlyHost describes a registry instance identified in source strings by a -// simple bare hostname like registry.terraform.io. -type FriendlyHost struct { - Raw string -} - -func NewFriendlyHost(host string) *FriendlyHost { - return &FriendlyHost{Raw: host} -} - -// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the -// given string. If no valid prefix is found, host will be nil and rest will -// contain the full source string. The host prefix must terminate at the end of -// the input or at the first / character. If one or more characters exist after -// the first /, they will be returned as rest (without the / delimiter). -// Hostnames containing punycode WILL be parsed successfully since they may have -// come from an internal normalized source string, however should be considered -// invalid if the string came from a user directly. This must be checked -// explicitly for user-input strings by calling Valid() on the -// returned host. -func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { - parts := strings.SplitN(source, "/", 2) - - if hostRe.MatchString(parts[0]) { - host = &FriendlyHost{Raw: parts[0]} - if len(parts) == 2 { - rest = parts[1] - } - return - } - - // No match, return whole string as rest along with nil host - rest = source - return -} - -// Valid returns whether the host prefix is considered valid in any case. -// Example of invalid prefixes might include ones that don't conform to the host -// name specifications. Not that IDN prefixes containing punycode are not valid -// input which we expect to always be in user-input or normalised display form. -func (h *FriendlyHost) Valid() bool { - return svchost.IsValid(h.Raw) -} - -// Display returns the host formatted for display to the user in CLI or web -// output. -func (h *FriendlyHost) Display() string { - return svchost.ForDisplay(h.Raw) -} - -// Normalized returns the host formatted for internal reference or comparison. -func (h *FriendlyHost) Normalized() string { - host, err := svchost.ForComparison(h.Raw) - if err != nil { - return InvalidHostString - } - return string(host) -} - -// String returns the host formatted as the user originally typed it assuming it -// was parsed from user input. -func (h *FriendlyHost) String() string { - return h.Raw -} - -// Equal compares the FriendlyHost against another instance taking normalization -// into account. Invalid hosts cannot be compared and will always return false. -func (h *FriendlyHost) Equal(other *FriendlyHost) bool { - if other == nil { - return false - } - - otherHost, err := svchost.ForComparison(other.Raw) - if err != nil { - return false - } - - host, err := svchost.ForComparison(h.Raw) - if err != nil { - return false - } - - return otherHost == host -} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go deleted file mode 100644 index c3edd7d8..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go +++ /dev/null @@ -1,205 +0,0 @@ -package regsrc - -import ( - "errors" - "fmt" - "regexp" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - ErrInvalidModuleSource = errors.New("not a valid registry module source") - - // nameSubRe is the sub-expression that matches a valid module namespace or - // name. It's strictly a super-set of what GitHub allows for user/org and - // repo names respectively, but more restrictive than our original repo-name - // regex which allowed periods but could cause ambiguity with hostname - // prefixes. It does not anchor the start or end so it can be composed into - // more complex RegExps below. Alphanumeric with - and _ allowed in non - // leading or trailing positions. Max length 64 chars. (GitHub username is - // 38 max.) - nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" - - // providerSubRe is the sub-expression that matches a valid provider. It - // does not anchor the start or end so it can be composed into more complex - // RegExps below. Only lowercase chars and digits are supported in practice. - // Max length 64 chars. - providerSubRe = "[0-9a-z]{1,64}" - - // moduleSourceRe is a regular expression that matches the basic - // namespace/name/provider[//...] format for registry sources. It assumes - // any FriendlyHost prefix has already been removed if present. - moduleSourceRe = regexp.MustCompile( - fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", - nameSubRe, nameSubRe, providerSubRe)) - - // NameRe is a regular expression defining the format allowed for namespace - // or name fields in module registry implementations. - NameRe = regexp.MustCompile("^" + nameSubRe + "$") - - // ProviderRe is a regular expression defining the format allowed for - // provider fields in module registry implementations. - ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") - - // these hostnames are not allowed as registry sources, because they are - // already special case module sources in terraform. - disallowed = map[string]bool{ - "github.com": true, - "bitbucket.org": true, - } -) - -// Module describes a Terraform Registry Module source. -type Module struct { - // RawHost is the friendly host prefix if one was present. It might be nil - // if the original source had no host prefix which implies - // PublicRegistryHost but is distinct from having an actual pointer to - // PublicRegistryHost since it encodes the fact the original string didn't - // include a host prefix at all which is significant for recovering actual - // input not just normalized form. Most callers should access it with Host() - // which will return public registry host instance if it's nil. - RawHost *FriendlyHost - RawNamespace string - RawName string - RawProvider string - RawSubmodule string -} - -// NewModule construct a new module source from separate parts. Pass empty -// string if host or submodule are not needed. -func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { - m := &Module{ - RawNamespace: namespace, - RawName: name, - RawProvider: provider, - RawSubmodule: submodule, - } - if host != "" { - h := NewFriendlyHost(host) - if h != nil { - fmt.Println("HOST:", h) - if !h.Valid() || disallowed[h.Display()] { - return nil, ErrInvalidModuleSource - } - } - m.RawHost = h - } - return m, nil -} - -// ParseModuleSource attempts to parse source as a Terraform registry module -// source. If the string is not found to be in a valid format, -// ErrInvalidModuleSource is returned. Note that this can only be used on -// "input" strings, e.g. either ones supplied by the user or potentially -// normalised but in Display form (unicode). It will fail to parse a source with -// a punycoded domain since this is not permitted input from a user. If you have -// an already normalized string internally, you can compare it without parsing -// by comparing with the normalized version of the subject with the normal -// string equality operator. -func ParseModuleSource(source string) (*Module, error) { - // See if there is a friendly host prefix. - host, rest := ParseFriendlyHost(source) - if host != nil { - if !host.Valid() || disallowed[host.Display()] { - return nil, ErrInvalidModuleSource - } - } - - matches := moduleSourceRe.FindStringSubmatch(rest) - if len(matches) < 4 { - return nil, ErrInvalidModuleSource - } - - m := &Module{ - RawHost: host, - RawNamespace: matches[1], - RawName: matches[2], - RawProvider: matches[3], - } - - if len(matches) == 5 { - m.RawSubmodule = matches[4] - } - - return m, nil -} - -// Display returns the source formatted for display to the user in CLI or web -// output. -func (m *Module) Display() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) -} - -// Normalized returns the source formatted for internal reference or comparison. -func (m *Module) Normalized() string { - return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) -} - -// String returns the source formatted as the user originally typed it assuming -// it was parsed from user input. -func (m *Module) String() string { - // Don't normalize public registry hostname - leave it exactly like the user - // input it. - hostPrefix := "" - if m.RawHost != nil { - hostPrefix = m.RawHost.String() + "/" - } - return m.formatWithPrefix(hostPrefix, true) -} - -// Equal compares the module source against another instance taking -// normalization into account. -func (m *Module) Equal(other *Module) bool { - return m.Normalized() == other.Normalized() -} - -// Host returns the FriendlyHost object describing which registry this module is -// in. If the original source string had not host component this will return the -// PublicRegistryHost. -func (m *Module) Host() *FriendlyHost { - if m.RawHost == nil { - return PublicRegistryHost - } - return m.RawHost -} - -func (m *Module) normalizedHostPrefix(host string) string { - if m.Host().Equal(PublicRegistryHost) { - return "" - } - return host + "/" -} - -func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { - suffix := "" - if m.RawSubmodule != "" { - suffix = "//" + m.RawSubmodule - } - str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, - m.RawProvider, suffix) - - // lower case by default - if !preserveCase { - return strings.ToLower(str) - } - return str -} - -// Module returns just the registry ID of the module, without a hostname or -// suffix. -func (m *Module) Module() string { - return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) -} - -// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may -// contain an invalid hostname, this also returns an error indicating if it -// could be converted to a svchost.Hostname. If no host is specified, the -// default PublicRegistryHost is returned. -func (m *Module) SvcHost() (svchost.Hostname, error) { - if m.RawHost == nil { - return svchost.ForComparison(PublicRegistryHost.Raw) - } - return svchost.ForComparison(m.RawHost.Raw) -} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go deleted file mode 100644 index c430bf14..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package regsrc provides helpers for working with source strings that identify -// resources within a Terraform registry. -package regsrc - -var ( - // PublicRegistryHost is a FriendlyHost that represents the public registry. - PublicRegistryHost = NewFriendlyHost("registry.terraform.io") -) diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go deleted file mode 100644 index 7205d03b..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go +++ /dev/null @@ -1,60 +0,0 @@ -package regsrc - -import ( - "fmt" - "runtime" - "strings" - - "github.com/hashicorp/terraform-svchost" -) - -var ( - // DefaultProviderNamespace represents the namespace for canonical - // HashiCorp-controlled providers. - DefaultProviderNamespace = "-" -) - -// TerraformProvider describes a Terraform Registry Provider source. -type TerraformProvider struct { - RawHost *FriendlyHost - RawNamespace string - RawName string - OS string - Arch string -} - -// NewTerraformProvider constructs a new provider source. -func NewTerraformProvider(name, os, arch string) *TerraformProvider { - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - - // separate namespace if included - namespace := DefaultProviderNamespace - if names := strings.SplitN(name, "/", 2); len(names) == 2 { - namespace, name = names[0], names[1] - } - p := &TerraformProvider{ - RawHost: PublicRegistryHost, - RawNamespace: namespace, - RawName: name, - OS: os, - Arch: arch, - } - - return p -} - -// Provider returns just the registry ID of the provider -func (p *TerraformProvider) TerraformProvider() string { - return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) -} - -// SvcHost returns the svchost.Hostname for this provider. The -// default PublicRegistryHost is returned. -func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { - return svchost.ForComparison(PublicRegistryHost.Raw) -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module.go b/vendor/github.com/hashicorp/terraform/registry/response/module.go deleted file mode 100644 index 3bd2b3df..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/module.go +++ /dev/null @@ -1,93 +0,0 @@ -package response - -import ( - "time" -) - -// Module is the response structure with the data for a single module version. -type Module struct { - ID string `json:"id"` - - //--------------------------------------------------------------- - // Metadata about the overall module. - - Owner string `json:"owner"` - Namespace string `json:"namespace"` - Name string `json:"name"` - Version string `json:"version"` - Provider string `json:"provider"` - Description string `json:"description"` - Source string `json:"source"` - PublishedAt time.Time `json:"published_at"` - Downloads int `json:"downloads"` - Verified bool `json:"verified"` -} - -// ModuleDetail represents a module in full detail. -type ModuleDetail struct { - Module - - //--------------------------------------------------------------- - // Metadata about the overall module. This is only available when - // requesting the specific module (not in list responses). - - // Root is the root module. - Root *ModuleSubmodule `json:"root"` - - // Submodules are the other submodules that are available within - // this module. - Submodules []*ModuleSubmodule `json:"submodules"` - - //--------------------------------------------------------------- - // The fields below are only set when requesting this specific - // module. They are available to easily know all available versions - // and providers without multiple API calls. - - Providers []string `json:"providers"` // All available providers - Versions []string `json:"versions"` // All versions -} - -// ModuleSubmodule is the metadata about a specific submodule within -// a module. This includes the root module as a special case. -type ModuleSubmodule struct { - Path string `json:"path"` - Readme string `json:"readme"` - Empty bool `json:"empty"` - - Inputs []*ModuleInput `json:"inputs"` - Outputs []*ModuleOutput `json:"outputs"` - Dependencies []*ModuleDep `json:"dependencies"` - Resources []*ModuleResource `json:"resources"` -} - -// ModuleInput is an input for a module. -type ModuleInput struct { - Name string `json:"name"` - Description string `json:"description"` - Default string `json:"default"` -} - -// ModuleOutput is an output for a module. -type ModuleOutput struct { - Name string `json:"name"` - Description string `json:"description"` -} - -// ModuleDep is an output for a module. -type ModuleDep struct { - Name string `json:"name"` - Source string `json:"source"` - Version string `json:"version"` -} - -// ModuleProviderDep is the output for a provider dependency -type ModuleProviderDep struct { - Name string `json:"name"` - Version string `json:"version"` -} - -// ModuleResource is an output for a module. -type ModuleResource struct { - Name string `json:"name"` - Type string `json:"type"` -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_list.go b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go deleted file mode 100644 index 97837482..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/module_list.go +++ /dev/null @@ -1,7 +0,0 @@ -package response - -// ModuleList is the response structure for a pageable list of modules. -type ModuleList struct { - Meta PaginationMeta `json:"meta"` - Modules []*Module `json:"modules"` -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go deleted file mode 100644 index e48499dc..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go +++ /dev/null @@ -1,14 +0,0 @@ -package response - -// ModuleProvider represents a single provider for modules. -type ModuleProvider struct { - Name string `json:"name"` - Downloads int `json:"downloads"` - ModuleCount int `json:"module_count"` -} - -// ModuleProviderList is the response structure for a pageable list of ModuleProviders. -type ModuleProviderList struct { - Meta PaginationMeta `json:"meta"` - Providers []*ModuleProvider `json:"providers"` -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go deleted file mode 100644 index f69e9750..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go +++ /dev/null @@ -1,32 +0,0 @@ -package response - -// ModuleVersions is the response format that contains all metadata about module -// versions needed for terraform CLI to resolve version constraints. See RFC -// TF-042 for details on this format. -type ModuleVersions struct { - Modules []*ModuleProviderVersions `json:"modules"` -} - -// ModuleProviderVersions is the response format for a single module instance, -// containing metadata about all versions and their dependencies. -type ModuleProviderVersions struct { - Source string `json:"source"` - Versions []*ModuleVersion `json:"versions"` -} - -// ModuleVersion is the output metadata for a given version needed by CLI to -// resolve candidate versions to satisfy requirements. -type ModuleVersion struct { - Version string `json:"version"` - Root VersionSubmodule `json:"root"` - Submodules []*VersionSubmodule `json:"submodules"` -} - -// VersionSubmodule is the output metadata for a submodule within a given -// version needed by CLI to resolve candidate versions to satisfy requirements. -// When representing the Root in JSON the path is omitted. -type VersionSubmodule struct { - Path string `json:"path,omitempty"` - Providers []*ModuleProviderDep `json:"providers"` - Dependencies []*ModuleDep `json:"dependencies"` -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go deleted file mode 100644 index 75a92549..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/pagination.go +++ /dev/null @@ -1,65 +0,0 @@ -package response - -import ( - "net/url" - "strconv" -) - -// PaginationMeta is a structure included in responses for pagination. -type PaginationMeta struct { - Limit int `json:"limit"` - CurrentOffset int `json:"current_offset"` - NextOffset *int `json:"next_offset,omitempty"` - PrevOffset *int `json:"prev_offset,omitempty"` - NextURL string `json:"next_url,omitempty"` - PrevURL string `json:"prev_url,omitempty"` -} - -// NewPaginationMeta populates pagination meta data from result parameters -func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { - pm := PaginationMeta{ - Limit: limit, - CurrentOffset: offset, - } - - // Calculate next/prev offsets, leave nil if not valid pages - nextOffset := offset + limit - if hasMore { - pm.NextOffset = &nextOffset - } - - prevOffset := offset - limit - if prevOffset < 0 { - prevOffset = 0 - } - if prevOffset < offset { - pm.PrevOffset = &prevOffset - } - - // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should - // catch missing URLs if we call with bad URL arg (and we care about them being present). - if currentURL != "" && pm.NextOffset != nil { - pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) - } - if currentURL != "" && pm.PrevOffset != nil { - pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) - } - - return pm -} - -func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { - u, err := url.Parse(baseURL) - if err != nil { - return "", err - } - q := u.Query() - if val == defaultVal { - // elide param if it's the default value - q.Del(key) - } else { - q.Set(key, strconv.Itoa(val)) - } - u.RawQuery = q.Encode() - return u.String(), nil -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider.go b/vendor/github.com/hashicorp/terraform/registry/response/provider.go deleted file mode 100644 index 5e8bae35..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/provider.go +++ /dev/null @@ -1,36 +0,0 @@ -package response - -import ( - "time" -) - -// Provider is the response structure with the data for a single provider -// version. This is just the metadata. A full provider response will be -// ProviderDetail. -type Provider struct { - ID string `json:"id"` - - //--------------------------------------------------------------- - // Metadata about the overall provider. - - Owner string `json:"owner"` - Namespace string `json:"namespace"` - Name string `json:"name"` - Version string `json:"version"` - Description string `json:"description"` - Source string `json:"source"` - PublishedAt time.Time `json:"published_at"` - Downloads int `json:"downloads"` -} - -// ProviderDetail represents a Provider with full detail. -type ProviderDetail struct { - Provider - - //--------------------------------------------------------------- - // The fields below are only set when requesting this specific - // module. They are available to easily know all available versions - // without multiple API calls. - - Versions []string `json:"versions"` // All versions -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go deleted file mode 100644 index 1dc7d237..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go +++ /dev/null @@ -1,7 +0,0 @@ -package response - -// ProviderList is the response structure for a pageable list of providers. -type ProviderList struct { - Meta PaginationMeta `json:"meta"` - Providers []*Provider `json:"providers"` -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/redirect.go b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go deleted file mode 100644 index d5eb49ba..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/redirect.go +++ /dev/null @@ -1,6 +0,0 @@ -package response - -// Redirect causes the frontend to perform a window redirect. -type Redirect struct { - URL string `json:"url"` -} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go deleted file mode 100644 index c2c333b0..00000000 --- a/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go +++ /dev/null @@ -1,95 +0,0 @@ -package response - -import ( - "sort" - "strings" - - version "github.com/hashicorp/go-version" -) - -// TerraformProvider is the response structure for all required information for -// Terraform to choose a download URL. It must include all versions and all -// platforms for Terraform to perform version and os/arch constraint matching -// locally. -type TerraformProvider struct { - ID string `json:"id"` - - Versions []*TerraformProviderVersion `json:"versions"` -} - -// TerraformProviderVersion is the Terraform-specific response structure for a -// provider version. -type TerraformProviderVersion struct { - Version string `json:"version"` - Protocols []string `json:"protocols"` - - Platforms []*TerraformProviderPlatform `json:"platforms"` -} - -// TerraformProviderVersions is the Terraform-specific response structure for an -// array of provider versions -type TerraformProviderVersions struct { - ID string `json:"id"` - Versions []*TerraformProviderVersion `json:"versions"` - Warnings []string `json:"warnings"` -} - -// TerraformProviderPlatform is the Terraform-specific response structure for a -// provider platform. -type TerraformProviderPlatform struct { - OS string `json:"os"` - Arch string `json:"arch"` -} - -// TerraformProviderPlatformLocation is the Terraform-specific response -// structure for a provider platform with all details required to perform a -// download. -type TerraformProviderPlatformLocation struct { - Protocols []string `json:"protocols"` - OS string `json:"os"` - Arch string `json:"arch"` - Filename string `json:"filename"` - DownloadURL string `json:"download_url"` - ShasumsURL string `json:"shasums_url"` - ShasumsSignatureURL string `json:"shasums_signature_url"` - Shasum string `json:"shasum"` - - SigningKeys SigningKeyList `json:"signing_keys"` -} - -// SigningKeyList is the response structure for a list of signing keys. -type SigningKeyList struct { - GPGKeys []*GPGKey `json:"gpg_public_keys"` -} - -// GPGKey is the response structure for a GPG key. -type GPGKey struct { - ASCIIArmor string `json:"ascii_armor"` - Source string `json:"source"` - SourceURL *string `json:"source_url"` -} - -// Collection type for TerraformProviderVersion -type ProviderVersionCollection []*TerraformProviderVersion - -// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg -// keys in the response. -func (signingKeys *SigningKeyList) GPGASCIIArmor() string { - keys := []string{} - - for _, gpgKey := range signingKeys.GPGKeys { - keys = append(keys, gpgKey.ASCIIArmor) - } - - return strings.Join(keys, "\n") -} - -// Sort sorts versions from newest to oldest. -func (v ProviderVersionCollection) Sort() { - sort.Slice(v, func(i, j int) bool { - versionA, _ := version.NewVersion(v[i].Version) - versionB, _ := version.NewVersion(v[j].Version) - - return versionA.GreaterThan(versionB) - }) -} diff --git a/vendor/github.com/hashicorp/terraform/states/doc.go b/vendor/github.com/hashicorp/terraform/states/doc.go deleted file mode 100644 index 7dd74ac7..00000000 --- a/vendor/github.com/hashicorp/terraform/states/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package states contains the types that are used to represent Terraform -// states. -package states diff --git a/vendor/github.com/hashicorp/terraform/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go deleted file mode 100644 index 0dc73499..00000000 --- a/vendor/github.com/hashicorp/terraform/states/eachmode_string.go +++ /dev/null @@ -1,35 +0,0 @@ -// Code generated by "stringer -type EachMode"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoEach-0] - _ = x[EachList-76] - _ = x[EachMap-77] -} - -const ( - _EachMode_name_0 = "NoEach" - _EachMode_name_1 = "EachListEachMap" -) - -var ( - _EachMode_index_1 = [...]uint8{0, 8, 15} -) - -func (i EachMode) String() string { - switch { - case i == 0: - return _EachMode_name_0 - case 76 <= i && i <= 77: - i -= 76 - return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] - default: - return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_generation.go b/vendor/github.com/hashicorp/terraform/states/instance_generation.go deleted file mode 100644 index 617ad4ea..00000000 --- a/vendor/github.com/hashicorp/terraform/states/instance_generation.go +++ /dev/null @@ -1,24 +0,0 @@ -package states - -// Generation is used to represent multiple objects in a succession of objects -// represented by a single resource instance address. A resource instance can -// have multiple generations over its lifetime due to object replacement -// (when a change can't be applied without destroying and re-creating), and -// multiple generations can exist at the same time when create_before_destroy -// is used. -// -// A Generation value can either be the value of the variable "CurrentGen" or -// a value of type DeposedKey. Generation values can be compared for equality -// using "==" and used as map keys. The zero value of Generation (nil) is not -// a valid generation and must not be used. -type Generation interface { - generation() -} - -// CurrentGen is the Generation representing the currently-active object for -// a resource instance. -var CurrentGen Generation - -type currentGen struct{} - -func (g currentGen) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object.go b/vendor/github.com/hashicorp/terraform/states/instance_object.go deleted file mode 100644 index 78e1dda9..00000000 --- a/vendor/github.com/hashicorp/terraform/states/instance_object.go +++ /dev/null @@ -1,125 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" -) - -// ResourceInstanceObject is the local representation of a specific remote -// object associated with a resource instance. In practice not all remote -// objects are actually remote in the sense of being accessed over the network, -// but this is the most common case. -// -// It is not valid to mutate a ResourceInstanceObject once it has been created. -// Instead, create a new object and replace the existing one. -type ResourceInstanceObject struct { - // Value is the object-typed value representing the remote object within - // Terraform. - Value cty.Value - - // Private is an opaque value set by the provider when this object was - // last created or updated. Terraform Core does not use this value in - // any way and it is not exposed anywhere in the user interface, so - // a provider can use it for retaining any necessary private state. - Private []byte - - // Status represents the "readiness" of the object as of the last time - // it was updated. - Status ObjectStatus - - // Dependencies is a set of absolute address to other resources this - // instance dependeded on when it was applied. This is used to construct - // the dependency relationships for an object whose configuration is no - // longer available, such as if it has been removed from configuration - // altogether, or is now deposed. - Dependencies []addrs.AbsResource - - // DependsOn corresponds to the deprecated `depends_on` field in the state. - // This field contained the configuration `depends_on` values, and some of - // the references from within a single module. - DependsOn []addrs.Referenceable -} - -// ObjectStatus represents the status of a RemoteObject. -type ObjectStatus rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus - -const ( - // ObjectReady is an object status for an object that is ready to use. - ObjectReady ObjectStatus = 'R' - - // ObjectTainted is an object status representing an object that is in - // an unrecoverable bad state due to a partial failure during a create, - // update, or delete operation. Since it cannot be moved into the - // ObjectRead state, a tainted object must be replaced. - ObjectTainted ObjectStatus = 'T' - - // ObjectPlanned is a special object status used only for the transient - // placeholder objects we place into state during the refresh and plan - // walks to stand in for objects that will be created during apply. - // - // Any object of this status must have a corresponding change recorded - // in the current plan, whose value must then be used in preference to - // the value stored in state when evaluating expressions. A planned - // object stored in state will be incomplete if any of its attributes are - // not yet known, and the plan must be consulted in order to "see" those - // unknown values, because the state is not able to represent them. - ObjectPlanned ObjectStatus = 'P' -) - -// Encode marshals the value within the receiver to produce a -// ResourceInstanceObjectSrc ready to be written to a state file. -// -// The given type must be the implied type of the resource type schema, and -// the given value must conform to it. It is important to pass the schema -// type and not the object's own type so that dynamically-typed attributes -// will be stored correctly. The caller must also provide the version number -// of the schema that the given type was derived from, which will be recorded -// in the source object so it can be used to detect when schema migration is -// required on read. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - // Our state serialization can't represent unknown values, so we convert - // them to nulls here. This is lossy, but nobody should be writing unknown - // values here and expecting to get them out again later. - // - // We get unknown values here while we're building out a "planned state" - // during the plan phase, but the value stored in the plan takes precedence - // for expression evaluation. The apply step should never produce unknown - // values, but if it does it's the responsibility of the caller to detect - // and raise an error about that. - val := cty.UnknownAsNull(o.Value) - - src, err := ctyjson.Marshal(val, ty) - if err != nil { - return nil, err - } - - return &ResourceInstanceObjectSrc{ - SchemaVersion: schemaVersion, - AttrsJSON: src, - Private: o.Private, - Status: o.Status, - Dependencies: o.Dependencies, - }, nil -} - -// AsTainted returns a deep copy of the receiver with the status updated to -// ObjectTainted. -func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { - if o == nil { - // A nil object can't be tainted, but we'll allow this anyway to - // avoid a crash, since we presumably intend to eventually record - // the object has having been deleted anyway. - return nil - } - ret := o.DeepCopy() - ret.Status = ObjectTainted - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go deleted file mode 100644 index a18cf313..00000000 --- a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go +++ /dev/null @@ -1,116 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// ResourceInstanceObjectSrc is a not-fully-decoded version of -// ResourceInstanceObject. Decoding of it can be completed by first handling -// any schema migration steps to get to the latest schema version and then -// calling method Decode with the implied type of the latest schema. -type ResourceInstanceObjectSrc struct { - // SchemaVersion is the resource-type-specific schema version number that - // was current when either AttrsJSON or AttrsFlat was encoded. Migration - // steps are required if this is less than the current version number - // reported by the corresponding provider. - SchemaVersion uint64 - - // AttrsJSON is a JSON-encoded representation of the object attributes, - // encoding the value (of the object type implied by the associated resource - // type schema) that represents this remote object in Terraform Language - // expressions, and is compared with configuration when producing a diff. - // - // This is retained in JSON format here because it may require preprocessing - // before decoding if, for example, the stored attributes are for an older - // schema version which the provider must upgrade before use. If the - // version is current, it is valid to simply decode this using the - // type implied by the current schema, without the need for the provider - // to perform an upgrade first. - // - // When writing a ResourceInstanceObject into the state, AttrsJSON should - // always be conformant to the current schema version and the current - // schema version should be recorded in the SchemaVersion field. - AttrsJSON []byte - - // AttrsFlat is a legacy form of attributes used in older state file - // formats, and in the new state format for objects that haven't yet been - // upgraded. This attribute is mutually exclusive with Attrs: for any - // ResourceInstanceObject, only one of these attributes may be populated - // and the other must be nil. - // - // An instance object with this field populated should be upgraded to use - // Attrs at the earliest opportunity, since this legacy flatmap-based - // format will be phased out over time. AttrsFlat should not be used when - // writing new or updated objects to state; instead, callers must follow - // the recommendations in the AttrsJSON documentation above. - AttrsFlat map[string]string - - // These fields all correspond to the fields of the same name on - // ResourceInstanceObject. - Private []byte - Status ObjectStatus - Dependencies []addrs.AbsResource - // deprecated - DependsOn []addrs.Referenceable -} - -// Decode unmarshals the raw representation of the object attributes. Pass the -// implied type of the corresponding resource type schema for correct operation. -// -// Before calling Decode, the caller must check that the SchemaVersion field -// exactly equals the version number of the schema whose implied type is being -// passed, or else the result is undefined. -// -// The returned object may share internal references with the receiver and -// so the caller must not mutate the receiver any further once once this -// method is called. -func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { - var val cty.Value - var err error - if os.AttrsFlat != nil { - // Legacy mode. We'll do our best to unpick this from the flatmap. - val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) - if err != nil { - return nil, err - } - } else { - val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) - if err != nil { - return nil, err - } - } - - return &ResourceInstanceObject{ - Value: val, - Status: os.Status, - Dependencies: os.Dependencies, - DependsOn: os.DependsOn, - Private: os.Private, - }, nil -} - -// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the -// metadata from the receiver and writing in the given new schema version -// and attribute value that are presumed to have resulted from upgrading -// from an older schema version. -func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { - new := os.DeepCopy() - new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap - - // This is the same principle as ResourceInstanceObject.Encode, but - // avoiding a decode/re-encode cycle because we don't have type info - // available for the "old" attributes. - newAttrs = cty.UnknownAsNull(newAttrs) - src, err := ctyjson.Marshal(newAttrs, newType) - if err != nil { - return nil, err - } - - new.AttrsJSON = src - new.SchemaVersion = newSchemaVersion - return new, nil -} diff --git a/vendor/github.com/hashicorp/terraform/states/module.go b/vendor/github.com/hashicorp/terraform/states/module.go deleted file mode 100644 index ec177edf..00000000 --- a/vendor/github.com/hashicorp/terraform/states/module.go +++ /dev/null @@ -1,320 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" -) - -// Module is a container for the states of objects within a particular module. -type Module struct { - Addr addrs.ModuleInstance - - // Resources contains the state for each resource. The keys in this map are - // an implementation detail and must not be used by outside callers. - Resources map[string]*Resource - - // OutputValues contains the state for each output value. The keys in this - // map are output value names. - OutputValues map[string]*OutputValue - - // LocalValues contains the value for each named output value. The keys - // in this map are local value names. - LocalValues map[string]cty.Value -} - -// NewModule constructs an empty module state for the given module address. -func NewModule(addr addrs.ModuleInstance) *Module { - return &Module{ - Addr: addr, - Resources: map[string]*Resource{}, - OutputValues: map[string]*OutputValue{}, - LocalValues: map[string]cty.Value{}, - } -} - -// Resource returns the state for the resource with the given address within -// the receiving module state, or nil if the requested resource is not tracked -// in the state. -func (ms *Module) Resource(addr addrs.Resource) *Resource { - return ms.Resources[addr.String()] -} - -// ResourceInstance returns the state for the resource instance with the given -// address within the receiving module state, or nil if the requested instance -// is not tracked in the state. -func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { - rs := ms.Resource(addr.Resource) - if rs == nil { - return nil - } - return rs.Instance(addr.Key) -} - -// SetResourceMeta updates the resource-level metadata for the resource -// with the given address, creating the resource state for it if it doesn't -// already exist. -func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr) - if rs == nil { - rs = &Resource{ - Addr: addr, - Instances: map[addrs.InstanceKey]*ResourceInstance{}, - } - ms.Resources[addr.String()] = rs - } - - rs.EachMode = eachMode - rs.ProviderConfig = provider -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed. -func (ms *Module) RemoveResource(addr addrs.Resource) { - delete(ms.Resources, addr.String()) -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance will be removed altogether. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - rs := ms.Resource(addr.Resource) - // if the resource is nil and the object is nil, don't do anything! - // you'll probably just cause issues - if obj == nil && rs == nil { - return - } - if obj == nil && rs != nil { - // does the resource have any other objects? - // if not then delete the whole resource - // When deleting the resource, ensure that its EachMode is NoEach, - // as a resource with EachList or EachMap can have 0 instances and be valid - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - delete(ms.Resources, addr.Resource.String()) - return - } - // check for an existing resource, now that we've ensured that rs.Instances is more than 0/not nil - is := rs.Instance(addr.Key) - if is == nil { - // if there is no instance on the resource with this address and obj is nil, return and change nothing - return - } - // if we have an instance, update the current - is.Current = obj - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - // Delete the resource if it has no instances, but only if NoEach - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - delete(ms.Resources, addr.Resource.String()) - return - } - } - // Nothing more to do here, so return! - return - } - if rs == nil && obj != nil { - // We don't have have a resource so make one, which is a side effect of setResourceMeta - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - // now we have a resource! so update the rs value to point to it - rs = ms.Resource(addr.Resource) - } - // Get our instance from the resource; it could be there or not at this point - is := rs.Instance(addr.Key) - if is == nil { - // if we don't have a resource, create one and add to the instances - is = rs.CreateInstance(addr.Key) - // update the resource meta because we have a new instance, so EachMode may have changed - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - } - // Update the resource's ProviderConfig, in case the provider has updated - rs.ProviderConfig = provider - is.Current = obj -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) - - rs := ms.Resource(addr.Resource) - is := rs.EnsureInstance(addr.Key) - if obj != nil { - is.Deposed[key] = obj - } else { - delete(is.Deposed, key) - } - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceAll removes the record of all objects associated with -// the specified resource instance, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - delete(rs.Instances, addr.Key) - - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { - rs := ms.Resource(addr.Resource) - if rs == nil { - return - } - is := rs.Instance(addr.Key) - if is == nil { - return - } - delete(is.Deposed, key) - - if !is.HasObjects() { - // If we have no objects at all then we'll clean up. - delete(rs.Instances, addr.Key) - } - if rs.EachMode == NoEach && len(rs.Instances) == 0 { - // Also clean up if we only expect to have one instance anyway - // and there are none. We leave the resource behind if an each mode - // is active because an empty list or map of instances is a valid state. - delete(ms.Resources, addr.Resource.String()) - } -} - -// deposeResourceInstanceObject is the real implementation of -// SyncState.DeposeResourceInstanceObject. -func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { - is := ms.ResourceInstance(addr) - if is == nil { - return NotDeposed - } - return is.deposeCurrentObject(forceKey) -} - -// maybeRestoreResourceInstanceDeposed is the real implementation of -// SyncState.MaybeRestoreResourceInstanceDeposed. -func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { - rs := ms.Resource(addr.Resource) - if rs == nil { - return false - } - is := rs.Instance(addr.Key) - if is == nil { - return false - } - if is.Current != nil { - return false - } - if len(is.Deposed) == 0 { - return false - } - is.Current = is.Deposed[key] - delete(is.Deposed, key) - return true -} - -// SetOutputValue writes an output value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { - os := &OutputValue{ - Value: value, - Sensitive: sensitive, - } - ms.OutputValues[name] = os - return os -} - -// RemoveOutputValue removes the output value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveOutputValue(name string) { - delete(ms.OutputValues, name) -} - -// SetLocalValue writes a local value into the state, overwriting any -// existing value of the same name. -func (ms *Module) SetLocalValue(name string, value cty.Value) { - ms.LocalValues[name] = value -} - -// RemoveLocalValue removes the local value of the given name from the state, -// if it exists. This method is a no-op if there is no value of the given -// name. -func (ms *Module) RemoveLocalValue(name string) { - delete(ms.LocalValues, name) -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// You probably shouldn't call this! See the method of the same name on -// type State for more information on what this is for and the rare situations -// where it is safe to use. -func (ms *Module) PruneResourceHusks() { - for _, rs := range ms.Resources { - if len(rs.Instances) == 0 { - ms.RemoveResource(rs.Addr) - } - } -} - -// empty returns true if the receving module state is contributing nothing -// to the state. In other words, it returns true if the module could be -// removed from the state altogether without changing the meaning of the state. -// -// In practice a module containing no objects is the same as a non-existent -// module, and so we can opportunistically clean up once a module becomes -// empty on the assumption that it will be re-added if needed later. -func (ms *Module) empty() bool { - if ms == nil { - return true - } - - // This must be updated to cover any new collections added to Module - // in future. - return (len(ms.Resources) == 0 && - len(ms.OutputValues) == 0 && - len(ms.LocalValues) == 0) -} diff --git a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go deleted file mode 100644 index 96a6db2f..00000000 --- a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. - -package states - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ObjectReady-82] - _ = x[ObjectTainted-84] - _ = x[ObjectPlanned-80] -} - -const ( - _ObjectStatus_name_0 = "ObjectPlanned" - _ObjectStatus_name_1 = "ObjectReady" - _ObjectStatus_name_2 = "ObjectTainted" -) - -func (i ObjectStatus) String() string { - switch { - case i == 80: - return _ObjectStatus_name_0 - case i == 82: - return _ObjectStatus_name_1 - case i == 84: - return _ObjectStatus_name_2 - default: - return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/output_value.go b/vendor/github.com/hashicorp/terraform/states/output_value.go deleted file mode 100644 index d232b76d..00000000 --- a/vendor/github.com/hashicorp/terraform/states/output_value.go +++ /dev/null @@ -1,14 +0,0 @@ -package states - -import ( - "github.com/zclconf/go-cty/cty" -) - -// OutputValue represents the state of a particular output value. -// -// It is not valid to mutate an OutputValue object once it has been created. -// Instead, create an entirely new OutputValue to replace the previous one. -type OutputValue struct { - Value cty.Value - Sensitive bool -} diff --git a/vendor/github.com/hashicorp/terraform/states/resource.go b/vendor/github.com/hashicorp/terraform/states/resource.go deleted file mode 100644 index da883dda..00000000 --- a/vendor/github.com/hashicorp/terraform/states/resource.go +++ /dev/null @@ -1,246 +0,0 @@ -package states - -import ( - "fmt" - "math/rand" - "time" - - "github.com/hashicorp/terraform/addrs" -) - -// Resource represents the state of a resource. -type Resource struct { - // Addr is the module-relative address for the resource this state object - // belongs to. - Addr addrs.Resource - - // EachMode is the multi-instance mode currently in use for this resource, - // or NoEach if this is a single-instance resource. This dictates what - // type of value is returned when accessing this resource via expressions - // in the Terraform language. - EachMode EachMode - - // Instances contains the potentially-multiple instances associated with - // this resource. This map can contain a mixture of different key types, - // but only the ones of InstanceKeyType are considered current. - Instances map[addrs.InstanceKey]*ResourceInstance - - // ProviderConfig is the absolute address for the provider configuration that - // most recently managed this resource. This is used to connect a resource - // with a provider configuration when the resource configuration block is - // not available, such as if it has been removed from configuration - // altogether. - ProviderConfig addrs.AbsProviderConfig -} - -// Instance returns the state for the instance with the given key, or nil -// if no such instance is tracked within the state. -func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { - return rs.Instances[key] -} - -// CreateInstance creates an instance and adds it to the resource -func (rs *Resource) CreateInstance(key addrs.InstanceKey) *ResourceInstance { - is := NewResourceInstance() - rs.Instances[key] = is - return is -} - -// EnsureInstance returns the state for the instance with the given key, -// creating a new empty state for it if one doesn't already exist. -// -// Because this may create and save a new state, it is considered to be -// a write operation. -func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { - ret := rs.Instance(key) - if ret == nil { - ret = NewResourceInstance() - rs.Instances[key] = ret - } - return ret -} - -// ResourceInstance represents the state of a particular instance of a resource. -type ResourceInstance struct { - // Current, if non-nil, is the remote object that is currently represented - // by the corresponding resource instance. - Current *ResourceInstanceObjectSrc - - // Deposed, if len > 0, contains any remote objects that were previously - // represented by the corresponding resource instance but have been - // replaced and are pending destruction due to the create_before_destroy - // lifecycle mode. - Deposed map[DeposedKey]*ResourceInstanceObjectSrc -} - -// NewResourceInstance constructs and returns a new ResourceInstance, ready to -// use. -func NewResourceInstance() *ResourceInstance { - return &ResourceInstance{ - Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, - } -} - -// HasCurrent returns true if this resource instance has a "current"-generation -// object. Most instances do, but this can briefly be false during a -// create-before-destroy replace operation when the current has been deposed -// but its replacement has not yet been created. -func (i *ResourceInstance) HasCurrent() bool { - return i != nil && i.Current != nil -} - -// HasDeposed returns true if this resource instance has a deposed object -// with the given key. -func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { - return i != nil && i.Deposed[key] != nil -} - -// HasAnyDeposed returns true if this resource instance has one or more -// deposed objects. -func (i *ResourceInstance) HasAnyDeposed() bool { - return i != nil && len(i.Deposed) > 0 -} - -// HasObjects returns true if this resource has any objects at all, whether -// current or deposed. -func (i *ResourceInstance) HasObjects() bool { - return i.Current != nil || len(i.Deposed) != 0 -} - -// deposeCurrentObject is part of the real implementation of -// SyncState.DeposeResourceInstanceObject. The exported method uses a lock -// to ensure that we can safely allocate an unused deposed key without -// collision. -func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { - if !i.HasCurrent() { - return NotDeposed - } - - key := forceKey - if key == NotDeposed { - key = i.findUnusedDeposedKey() - } else { - if _, exists := i.Deposed[key]; exists { - panic(fmt.Sprintf("forced key %s is already in use", forceKey)) - } - } - i.Deposed[key] = i.Current - i.Current = nil - return key -} - -// GetGeneration retrieves the object of the given generation from the -// ResourceInstance, or returns nil if there is no such object. -// -// If the given generation is nil or invalid, this method will panic. -func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { - if gen == CurrentGen { - return i.Current - } - if dk, ok := gen.(DeposedKey); ok { - return i.Deposed[dk] - } - if gen == nil { - panic(fmt.Sprintf("get with nil Generation")) - } - // Should never fall out here, since the above covers all possible - // Generation values. - panic(fmt.Sprintf("get invalid Generation %#v", gen)) -} - -// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance at the time of the call. -// -// Note that the validity of this result may change if new deposed keys are -// allocated before it is used. To avoid this risk, instead use the -// DeposeResourceInstanceObject method on the SyncState wrapper type, which -// allocates a key and uses it atomically. -func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { - return i.findUnusedDeposedKey() -} - -// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to -// already be in use for this instance. -func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { - for { - key := NewDeposedKey() - if _, exists := i.Deposed[key]; !exists { - return key - } - // Spin until we find a unique one. This shouldn't take long, because - // we have a 32-bit keyspace and there's rarely more than one deposed - // instance. - } -} - -// EachMode specifies the multi-instance mode for a resource. -type EachMode rune - -const ( - NoEach EachMode = 0 - EachList EachMode = 'L' - EachMap EachMode = 'M' -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode - -func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { - switch key.(type) { - case addrs.IntKey: - return EachList - case addrs.StringKey: - return EachMap - default: - if key == addrs.NoKey { - return NoEach - } - panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) - } -} - -// DeposedKey is a 8-character hex string used to uniquely identify deposed -// instance objects in the state. -type DeposedKey string - -// NotDeposed is a special invalid value of DeposedKey that is used to represent -// the absense of a deposed key. It must not be used as an actual deposed key. -const NotDeposed = DeposedKey("") - -var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) - -// NewDeposedKey generates a pseudo-random deposed key. Because of the short -// length of these keys, uniqueness is not a natural consequence and so the -// caller should test to see if the generated key is already in use and generate -// another if so, until a unique key is found. -func NewDeposedKey() DeposedKey { - v := deposedKeyRand.Uint32() - return DeposedKey(fmt.Sprintf("%08x", v)) -} - -func (k DeposedKey) String() string { - return string(k) -} - -func (k DeposedKey) GoString() string { - ks := string(k) - switch { - case ks == "": - return "states.NotDeposed" - default: - return fmt.Sprintf("states.DeposedKey(%s)", ks) - } -} - -// Generation is a helper method to convert a DeposedKey into a Generation. -// If the reciever is anything other than NotDeposed then the result is -// just the same value as a Generation. If the receiver is NotDeposed then -// the result is CurrentGen. -func (k DeposedKey) Generation() Generation { - if k == NotDeposed { - return CurrentGen - } - return k -} - -// generation is an implementation of Generation. -func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/state.go b/vendor/github.com/hashicorp/terraform/states/state.go deleted file mode 100644 index 1f842359..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state.go +++ /dev/null @@ -1,229 +0,0 @@ -package states - -import ( - "sort" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" -) - -// State is the top-level type of a Terraform state. -// -// A state should be mutated only via its accessor methods, to ensure that -// invariants are preserved. -// -// Access to State and the nested values within it is not concurrency-safe, -// so when accessing a State object concurrently it is the caller's -// responsibility to ensure that only one write is in progress at a time -// and that reads only occur when no write is in progress. The most common -// way to acheive this is to wrap the State in a SyncState and use the -// higher-level atomic operations supported by that type. -type State struct { - // Modules contains the state for each module. The keys in this map are - // an implementation detail and must not be used by outside callers. - Modules map[string]*Module -} - -// NewState constructs a minimal empty state, containing an empty root module. -func NewState() *State { - modules := map[string]*Module{} - modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) - return &State{ - Modules: modules, - } -} - -// BuildState is a helper -- primarily intended for tests -- to build a state -// using imperative code against the StateSync type while still acting as -// an expression of type *State to assign into a containing struct. -func BuildState(cb func(*SyncState)) *State { - s := NewState() - cb(s.SyncWrapper()) - return s -} - -// Empty returns true if there are no resources or populated output values -// in the receiver. In other words, if this state could be safely replaced -// with the return value of NewState and be functionally equivalent. -func (s *State) Empty() bool { - if s == nil { - return true - } - for _, ms := range s.Modules { - if len(ms.Resources) != 0 { - return false - } - if len(ms.OutputValues) != 0 { - return false - } - } - return true -} - -// Module returns the state for the module with the given address, or nil if -// the requested module is not tracked in the state. -func (s *State) Module(addr addrs.ModuleInstance) *Module { - if s == nil { - panic("State.Module on nil *State") - } - return s.Modules[addr.String()] -} - -// RemoveModule removes the module with the given address from the state, -// unless it is the root module. The root module cannot be deleted, and so -// this method will panic if that is attempted. -// -// Removing a module implicitly discards all of the resources, outputs and -// local values within it, and so this should usually be done only for empty -// modules. For callers accessing the state through a SyncState wrapper, modules -// are automatically pruned if they are empty after one of their contained -// elements is removed. -func (s *State) RemoveModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - panic("attempted to remove root module") - } - - delete(s.Modules, addr.String()) -} - -// RootModule is a convenient alias for Module(addrs.RootModuleInstance). -func (s *State) RootModule() *Module { - if s == nil { - panic("RootModule called on nil State") - } - return s.Modules[addrs.RootModuleInstance.String()] -} - -// EnsureModule returns the state for the module with the given address, -// creating and adding a new one if necessary. -// -// Since this might modify the state to add a new instance, it is considered -// to be a write operation. -func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { - ms := s.Module(addr) - if ms == nil { - ms = NewModule(addr) - s.Modules[addr.String()] = ms - } - return ms -} - -// HasResources returns true if there is at least one resource (of any mode) -// present in the receiving state. -func (s *State) HasResources() bool { - if s == nil { - return false - } - for _, ms := range s.Modules { - if len(ms.Resources) > 0 { - return true - } - } - return false -} - -// Resource returns the state for the resource with the given address, or nil -// if no such resource is tracked in the state. -func (s *State) Resource(addr addrs.AbsResource) *Resource { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.Resource(addr.Resource) -} - -// ResourceInstance returns the state for the resource instance with the given -// address, or nil if no such resource is tracked in the state. -func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - if s == nil { - panic("State.ResourceInstance on nil *State") - } - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.ResourceInstance(addr.Resource) -} - -// OutputValue returns the state for the output value with the given address, -// or nil if no such output value is tracked in the state. -func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - ms := s.Module(addr.Module) - if ms == nil { - return nil - } - return ms.OutputValues[addr.OutputValue.Name] -} - -// LocalValue returns the value of the named local value with the given address, -// or cty.NilVal if no such value is tracked in the state. -func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { - ms := s.Module(addr.Module) - if ms == nil { - return cty.NilVal - } - return ms.LocalValues[addr.LocalValue.Name] -} - -// ProviderAddrs returns a list of all of the provider configuration addresses -// referenced throughout the receiving state. -// -// The result is de-duplicated so that each distinct address appears only once. -func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { - if s == nil { - return nil - } - - m := map[string]addrs.AbsProviderConfig{} - for _, ms := range s.Modules { - for _, rc := range ms.Resources { - m[rc.ProviderConfig.String()] = rc.ProviderConfig - } - } - if len(m) == 0 { - return nil - } - - // This is mainly just so we'll get stable results for testing purposes. - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - ret := make([]addrs.AbsProviderConfig, len(keys)) - for i, key := range keys { - ret[i] = m[key] - } - - return ret -} - -// PruneResourceHusks is a specialized method that will remove any Resource -// objects that do not contain any instances, even if they have an EachMode. -// -// This should generally be used only after a "terraform destroy" operation, -// to finalize the cleanup of the state. It is not correct to use this after -// other operations because if a resource has "count = 0" or "for_each" over -// an empty collection then we want to retain it in the state so that references -// to it, particularly in "strange" contexts like "terraform console", can be -// properly resolved. -// -// This method MUST NOT be called concurrently with other readers and writers -// of the receiving state. -func (s *State) PruneResourceHusks() { - for _, m := range s.Modules { - m.PruneResourceHusks() - if len(m.Resources) == 0 && !m.Addr.IsRoot() { - s.RemoveModule(m.Addr) - } - } -} - -// SyncWrapper returns a SyncState object wrapping the receiver. -func (s *State) SyncWrapper() *SyncState { - return &SyncState{ - state: s, - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go deleted file mode 100644 index 7d7a7ef1..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go +++ /dev/null @@ -1,231 +0,0 @@ -package states - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// Taking deep copies of states is an important operation because state is -// otherwise a mutable data structure that is challenging to share across -// many separate callers. It is important that the DeepCopy implementations -// in this file comprehensively copy all parts of the state data structure -// that could be mutated via pointers. - -// DeepCopy returns a new state that contains equivalent data to the reciever -// but shares no backing memory in common. -// -// As with all methods on State, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - modules := make(map[string]*Module, len(s.Modules)) - for k, m := range s.Modules { - modules[k] = m.DeepCopy() - } - return &State{ - Modules: modules, - } -} - -// DeepCopy returns a new module state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Module, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (ms *Module) DeepCopy() *Module { - if ms == nil { - return nil - } - - resources := make(map[string]*Resource, len(ms.Resources)) - for k, r := range ms.Resources { - resources[k] = r.DeepCopy() - } - outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) - for k, v := range ms.OutputValues { - outputValues[k] = v.DeepCopy() - } - localValues := make(map[string]cty.Value, len(ms.LocalValues)) - for k, v := range ms.LocalValues { - // cty.Value is immutable, so we don't need to copy these. - localValues[k] = v - } - - return &Module{ - Addr: ms.Addr, // technically mutable, but immutable by convention - Resources: resources, - OutputValues: outputValues, - LocalValues: localValues, - } -} - -// DeepCopy returns a new resource state that contains equivalent data to the -// receiver but shares no backing memory in common. -// -// As with all methods on Resource, this method is not safe to use concurrently -// with writing to any portion of the recieving data structure. It is the -// caller's responsibility to ensure mutual exclusion for the duration of the -// operation, but may then freely modify the receiver and the returned copy -// independently once this method returns. -func (rs *Resource) DeepCopy() *Resource { - if rs == nil { - return nil - } - - instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) - for k, i := range rs.Instances { - instances[k] = i.DeepCopy() - } - - return &Resource{ - Addr: rs.Addr, - EachMode: rs.EachMode, - Instances: instances, - ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention - } -} - -// DeepCopy returns a new resource instance state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstance, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (is *ResourceInstance) DeepCopy() *ResourceInstance { - if is == nil { - return nil - } - - deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) - for k, obj := range is.Deposed { - deposed[k] = obj.DeepCopy() - } - - return &ResourceInstance{ - Current: is.Current.DeepCopy(), - Deposed: deposed, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObjectSrc, this method is not safe to -// use concurrently with writing to any portion of the recieving data structure. -// It is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { - if obj == nil { - return nil - } - - var attrsFlat map[string]string - if obj.AttrsFlat != nil { - attrsFlat = make(map[string]string, len(obj.AttrsFlat)) - for k, v := range obj.AttrsFlat { - attrsFlat[k] = v - } - } - - var attrsJSON []byte - if obj.AttrsJSON != nil { - attrsJSON = make([]byte, len(obj.AttrsJSON)) - copy(attrsJSON, obj.AttrsJSON) - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referencable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - var dependencies []addrs.AbsResource - if obj.Dependencies != nil { - dependencies = make([]addrs.AbsResource, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - } - - var dependsOn []addrs.Referenceable - if obj.DependsOn != nil { - dependsOn = make([]addrs.Referenceable, len(obj.DependsOn)) - copy(dependsOn, obj.DependsOn) - } - - return &ResourceInstanceObjectSrc{ - Status: obj.Status, - SchemaVersion: obj.SchemaVersion, - Private: private, - AttrsFlat: attrsFlat, - AttrsJSON: attrsJSON, - Dependencies: dependencies, - DependsOn: dependsOn, - } -} - -// DeepCopy returns a new resource instance object that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on ResourceInstanceObject, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { - if obj == nil { - return nil - } - - var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) - } - - // Some addrs.Referenceable implementations are technically mutable, but - // we treat them as immutable by convention and so we don't deep-copy here. - var dependencies []addrs.AbsResource - if obj.Dependencies != nil { - dependencies = make([]addrs.AbsResource, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) - } - - return &ResourceInstanceObject{ - Value: obj.Value, - Status: obj.Status, - Private: private, - Dependencies: dependencies, - } -} - -// DeepCopy returns a new output value state that contains equivalent data -// to the receiver but shares no backing memory in common. -// -// As with all methods on OutputValue, this method is not safe to use -// concurrently with writing to any portion of the recieving data structure. It -// is the caller's responsibility to ensure mutual exclusion for the duration -// of the operation, but may then freely modify the receiver and the returned -// copy independently once this method returns. -func (os *OutputValue) DeepCopy() *OutputValue { - if os == nil { - return nil - } - - return &OutputValue{ - Value: os.Value, - Sensitive: os.Sensitive, - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/state_equal.go b/vendor/github.com/hashicorp/terraform/states/state_equal.go deleted file mode 100644 index ea20967e..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state_equal.go +++ /dev/null @@ -1,18 +0,0 @@ -package states - -import ( - "reflect" -) - -// Equal returns true if the receiver is functionally equivalent to other, -// including any ephemeral portions of the state that would not be included -// if the state were saved to files. -// -// To test only the persistent portions of two states for equality, instead -// use statefile.StatesMarshalEqual. -func (s *State) Equal(other *State) bool { - // For the moment this is sufficient, but we may need to do something - // more elaborate in future if we have any portions of state that require - // more sophisticated comparisons. - return reflect.DeepEqual(s, other) -} diff --git a/vendor/github.com/hashicorp/terraform/states/state_string.go b/vendor/github.com/hashicorp/terraform/states/state_string.go deleted file mode 100644 index 8be3d01a..00000000 --- a/vendor/github.com/hashicorp/terraform/states/state_string.go +++ /dev/null @@ -1,279 +0,0 @@ -package states - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// String returns a rather-odd string representation of the entire state. -// -// This is intended to match the behavior of the older terraform.State.String -// method that is used in lots of existing tests. It should not be used in -// new tests: instead, use "cmp" to directly compare the state data structures -// and print out a diff if they do not match. -// -// This method should never be used in non-test code, whether directly by call -// or indirectly via a %s or %q verb in package fmt. -func (s *State) String() string { - if s == nil { - return "" - } - - // sort the modules by name for consistent output - modules := make([]string, 0, len(s.Modules)) - for m := range s.Modules { - modules = append(modules, m) - } - sort.Strings(modules) - - var buf bytes.Buffer - for _, name := range modules { - m := s.Modules[name] - mStr := m.testString() - - // If we're the root module, we just write the output directly. - if m.Addr.IsRoot() { - buf.WriteString(mStr + "\n") - continue - } - - // We need to build out a string that resembles the not-quite-standard - // format that terraform.State.String used to use, where there's a - // "module." prefix but then just a chain of all of the module names - // without any further "module." portions. - buf.WriteString("module") - for _, step := range m.Addr { - buf.WriteByte('.') - buf.WriteString(step.Name) - if step.InstanceKey != addrs.NoKey { - buf.WriteByte('[') - buf.WriteString(step.InstanceKey.String()) - buf.WriteByte(']') - } - } - buf.WriteString(":\n") - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// testString is used to produce part of the output of State.String. It should -// never be used directly. -func (m *Module) testString() string { - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - // We use AbsResourceInstance here, even though everything belongs to - // the same module, just because we have a sorting behavior defined - // for those but not for just ResourceInstance. - addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) - for _, rs := range m.Resources { - for ik := range rs.Instances { - addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) - } - } - - sort.Slice(addrsOrder, func(i, j int) bool { - return addrsOrder[i].Less(addrsOrder[j]) - }) - - for _, fakeAbsAddr := range addrsOrder { - addr := fakeAbsAddr.Resource - rs := m.Resource(addr.ContainingResource()) - is := m.ResourceInstance(addr) - - // Here we need to fake up a legacy-style address as the old state - // types would've used, since that's what our tests against those - // old types expect. The significant difference is that instancekey - // is dot-separated rather than using index brackets. - k := addr.ContainingResource().String() - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - k = fmt.Sprintf("%s.%d", k, tk) - default: - // No other key types existed for the legacy types, so we - // can do whatever we want here. We'll just use our standard - // syntax for these. - k = k + tk.String() - } - } - - id := LegacyInstanceObjectID(is.Current) - - taintStr := "" - if is.Current != nil && is.Current.Status == ObjectTainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(is.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) - - // Attributes were a flatmap before, but are not anymore. To preserve - // our old output as closely as possible we need to do a conversion - // to flatmap. Normally we'd want to do this with schema for - // accuracy, but for our purposes here it only needs to be approximate. - // This should produce an identical result for most cases, though - // in particular will differ in a few cases: - // - The keys used for elements in a set will be different - // - Values for attributes of type cty.DynamicPseudoType will be - // misinterpreted (but these weren't possible in old world anyway) - var attributes map[string]string - if obj := is.Current; obj != nil { - switch { - case obj.AttrsFlat != nil: - // Easy (but increasingly unlikely) case: the state hasn't - // actually been upgraded to the new form yet. - attributes = obj.AttrsFlat - case obj.AttrsJSON != nil: - ty, err := ctyjson.ImpliedType(obj.AttrsJSON) - if err == nil { - val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) - if err == nil { - attributes = hcl2shim.FlatmapValueFromHCL2(val) - } - } - } - } - attrKeys := make([]string, 0, len(attributes)) - for ak, val := range attributes { - if ak == "id" { - continue - } - - // don't show empty containers in the output - if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - // CAUTION: Since deposed keys are now random strings instead of - // incrementing integers, this result will not be deterministic - // if there is more than one deposed object. - i := 1 - for _, t := range is.Deposed { - id := LegacyInstanceObjectID(t) - taintStr := "" - if t.Status == ObjectTainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) - i++ - } - - if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range obj.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) - } - } - } - - if len(m.OutputValues) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - v := m.OutputValues[k] - lv := hcl2shim.ConfigValueFromHCL2(v.Value) - switch vTyped := lv.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - default: - buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) - } - } - } - - return buf.String() -} - -// LegacyInstanceObjectID is a helper for extracting an object id value from -// an instance object in a way that approximates how we used to do this -// for the old state types. ID is no longer first-class, so this is preserved -// only for compatibility with old tests that include the id as part of their -// expected value. -func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { - if obj == nil { - return "" - } - - if obj.AttrsJSON != nil { - type WithID struct { - ID string `json:"id"` - } - var withID WithID - err := json.Unmarshal(obj.AttrsJSON, &withID) - if err == nil { - return withID.ID - } - } else if obj.AttrsFlat != nil { - if flatID, exists := obj.AttrsFlat["id"]; exists { - return flatID - } - } - - // For resource types created after we removed id as special there may - // not actually be one at all. This is okay because older tests won't - // encounter this, and new tests shouldn't be using ids. - return "" -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go deleted file mode 100644 index a6d88ecd..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" -) - -const invalidFormat = "Invalid state file format" - -// jsonUnmarshalDiags is a helper that translates errors returned from -// json.Unmarshal into hopefully-more-helpful diagnostics messages. -func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - if err == nil { - return diags - } - - switch tErr := err.(type) { - case *json.SyntaxError: - // We've usually already successfully parsed a source file as JSON at - // least once before we'd use jsonUnmarshalDiags with it (to sniff - // the version number) so this particular error should not appear much - // in practice. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - // This is likely to be the most common area, describing a - // non-conformance between the file and the expected file format - // at a semantic level. - if tErr.Field != "" { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), - )) - break - } else { - // Without a field name, we can't really say anything helpful. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - "The state file does not conform to the expected JSON data structure.", - )) - } - default: - // Fallback for all other types of errors. This can happen only for - // custom UnmarshalJSON implementations, so should be encountered - // only rarely. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - invalidFormat, - fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), - )) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go deleted file mode 100644 index 625d0cf4..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package statefile deals with the file format used to serialize states for -// persistent storage and then deserialize them into memory again later. -package statefile diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/file.go b/vendor/github.com/hashicorp/terraform/states/statefile/file.go deleted file mode 100644 index 6e202401..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/file.go +++ /dev/null @@ -1,62 +0,0 @@ -package statefile - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/states" - tfversion "github.com/hashicorp/terraform/version" -) - -// File is the in-memory representation of a state file. It includes the state -// itself along with various metadata used to track changing state files for -// the same configuration over time. -type File struct { - // TerraformVersion is the version of Terraform that wrote this state file. - TerraformVersion *version.Version - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial uint64 - - // Lineage is set when a new, blank state file is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string - - // State is the actual state represented by this file. - State *states.State -} - -func New(state *states.State, lineage string, serial uint64) *File { - // To make life easier on callers, we'll accept a nil state here and just - // allocate an empty one, which is required for this file to be successfully - // written out. - if state == nil { - state = states.NewState() - } - - return &File{ - TerraformVersion: tfversion.SemVer, - State: state, - Lineage: lineage, - Serial: serial, - } -} - -// DeepCopy is a convenience method to create a new File object whose state -// is a deep copy of the receiver's, as implemented by states.State.DeepCopy. -func (f *File) DeepCopy() *File { - if f == nil { - return nil - } - return &File{ - TerraformVersion: f.TerraformVersion, - Serial: f.Serial, - Lineage: f.Lineage, - State: f.State.DeepCopy(), - } -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go deleted file mode 100644 index 4948b39b..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go +++ /dev/null @@ -1,40 +0,0 @@ -package statefile - -import ( - "bytes" - - "github.com/hashicorp/terraform/states" -) - -// StatesMarshalEqual returns true if and only if the two given states have -// an identical (byte-for-byte) statefile representation. -// -// This function compares only the portions of the state that are persisted -// in state files, so for example it will not return false if the only -// differences between the two states are local values or descendent module -// outputs. -func StatesMarshalEqual(a, b *states.State) bool { - var aBuf bytes.Buffer - var bBuf bytes.Buffer - - // nil states are not valid states, and so they can never martial equal. - if a == nil || b == nil { - return false - } - - // We write here some temporary files that have no header information - // populated, thus ensuring that we're only comparing the state itself - // and not any metadata. - err := Write(&File{State: a}, &aBuf) - if err != nil { - // Should never happen, because we're writing to an in-memory buffer - panic(err) - } - err = Write(&File{State: b}, &bBuf) - if err != nil { - // Should never happen, because we're writing to an in-memory buffer - panic(err) - } - - return bytes.Equal(aBuf.Bytes(), bBuf.Bytes()) -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/read.go b/vendor/github.com/hashicorp/terraform/states/statefile/read.go deleted file mode 100644 index d691c029..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/read.go +++ /dev/null @@ -1,209 +0,0 @@ -package statefile - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/tfdiags" - tfversion "github.com/hashicorp/terraform/version" -) - -// ErrNoState is returned by ReadState when the state file is empty. -var ErrNoState = errors.New("no state") - -// Read reads a state from the given reader. -// -// Legacy state format versions 1 through 3 are supported, but the result will -// contain object attributes in the deprecated "flatmap" format and so must -// be upgraded by the caller before use. -// -// If the state file is empty, the special error value ErrNoState is returned. -// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics -// potentially describing multiple errors. -func Read(r io.Reader) (*File, error) { - // Some callers provide us a "typed nil" *os.File here, which would - // cause us to panic below if we tried to use it. - if f, ok := r.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - var diags tfdiags.Diagnostics - - // We actually just buffer the whole thing in memory, because states are - // generally not huge and we need to do be able to sniff for a version - // number before full parsing. - src, err := ioutil.ReadAll(r) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to read state file", - fmt.Sprintf("The state file could not be read: %s", err), - )) - return nil, diags.Err() - } - - if len(src) == 0 { - return nil, ErrNoState - } - - state, diags := readState(src) - if diags.HasErrors() { - return nil, diags.Err() - } - - if state == nil { - // Should never happen - panic("readState returned nil state with no errors") - } - - if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { - return state, fmt.Errorf( - "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", - state.TerraformVersion, - tfversion.SemVer, - state.TerraformVersion, - ) - } - - return state, diags.Err() -} - -func readState(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - if looksLikeVersion0(src) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", - )) - return nil, diags - } - - version, versionDiags := sniffJSONStateVersion(src) - diags = diags.Append(versionDiags) - if versionDiags.HasErrors() { - return nil, diags - } - - switch version { - case 0: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", - )) - return nil, diags - case 1: - return readStateV1(src) - case 2: - return readStateV2(src) - case 3: - return readStateV3(src) - case 4: - return readStateV4(src) - default: - thisVersion := tfversion.SemVer.String() - creatingVersion := sniffJSONStateTerraformVersion(src) - switch { - case creatingVersion != "": - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), - )) - } - return nil, diags - } -} - -func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - type VersionSniff struct { - Version *uint64 `json:"version"` - } - var sniff VersionSniff - err := json.Unmarshal(src, &sniff) - if err != nil { - switch tErr := err.(type) { - case *json.SyntaxError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), - )) - case *json.UnmarshalTypeError: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file could not be parsed as JSON.", - )) - } - } - - if sniff.Version == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - unsupportedFormat, - "The state file does not have a \"version\" attribute, which is required to identify the format version.", - )) - return 0, diags - } - - return *sniff.Version, diags -} - -// sniffJSONStateTerraformVersion attempts to sniff the Terraform version -// specification from the given state file source code. The result is either -// a version string or an empty string if no version number could be extracted. -// -// This is a best-effort function intended to produce nicer error messages. It -// should not be used for any real processing. -func sniffJSONStateTerraformVersion(src []byte) string { - type VersionSniff struct { - Version string `json:"terraform_version"` - } - var sniff VersionSniff - - err := json.Unmarshal(src, &sniff) - if err != nil { - return "" - } - - // Attempt to parse the string as a version so we won't report garbage - // as a version number. - _, err = version.NewVersion(sniff.Version) - if err != nil { - return "" - } - - return sniff.Version -} - -// unsupportedFormat is a diagnostic summary message for when the state file -// seems to not be a state file at all, or is not a supported version. -// -// Use invalidFormat instead for the subtly-different case of "this looks like -// it's intended to be a state file but it's not structured correctly". -const unsupportedFormat = "Unsupported state file format" - -const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go deleted file mode 100644 index 9b533317..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go +++ /dev/null @@ -1,23 +0,0 @@ -package statefile - -// looksLikeVersion0 sniffs for the signature indicating a version 0 state -// file. -// -// Version 0 was the number retroactively assigned to Terraform's initial -// (unversioned) binary state file format, which was later superseded by the -// version 1 format in JSON. -// -// Version 0 is no longer supported, so this is used only to detect it and -// return a nice error to the user. -func looksLikeVersion0(src []byte) bool { - // Version 0 files begin with the magic prefix "tfstate". - const magic = "tfstate" - if len(src) < len(magic) { - // Not even long enough to have the magic prefix - return false - } - if string(src[0:len(magic)]) == magic { - return true - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go deleted file mode 100644 index 80d711bc..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go +++ /dev/null @@ -1,174 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV1 := &stateV1{} - err := json.Unmarshal(src, sV1) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV1(sV1) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2, err := upgradeStateV1ToV2(sV1) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV1 is a representation of the legacy JSON state format version 1. -// -// It is only used to read version 1 JSON files prior to upgrading them to -// the current format. -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go deleted file mode 100644 index 0b417e1c..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go +++ /dev/null @@ -1,172 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { - log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*moduleStateV2, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &stateV2{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &remoteStateV2{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root. - path = []string{"root"} - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*outputStateV2) - for key, output := range old.Outputs { - outputs[key] = &outputStateV2{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*resourceStateV2) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &moduleStateV2{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*instanceStateV2, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &resourceStateV2{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &instanceStateV2{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Meta: newMeta, - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go deleted file mode 100644 index be93924a..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go +++ /dev/null @@ -1,209 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV2 := &stateV2{} - err := json.Unmarshal(src, sV2) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV2(sV2) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3, err := upgradeStateV2ToV3(sV2) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 2. -// -// It is only used to read version 2 JSON files prior to upgrading them to -// the current format. -type stateV2 struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV2 `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *backendStateV2 `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV2 `json:"modules"` -} - -type remoteStateV2 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type outputStateV2 struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -type moduleStateV2 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*outputStateV2 `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV2 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` -} - -type resourceStateV2 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV2 `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*instanceStateV2 `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -type instanceStateV2 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` -} - -type backendStateV2 struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go deleted file mode 100644 index 2d03c07c..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go +++ /dev/null @@ -1,145 +0,0 @@ -package statefile - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" -) - -func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { - if old == nil { - return (*stateV3)(nil), nil - } - - var new *stateV3 - { - copy, err := copystructure.Config{Lock: true}.Copy(old) - if err != nil { - panic(err) - } - newWrongType := copy.(*stateV2) - newRightType := (stateV3)(*newWrongType) - new = &newRightType - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go deleted file mode 100644 index ab6414b0..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go +++ /dev/null @@ -1,50 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV3 := &stateV3{} - err := json.Unmarshal(src, sV3) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV3(sV3) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4, err := upgradeStateV3ToV4(sV3) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - upgradeFailed, - fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), - )) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -// stateV2 is a representation of the legacy JSON state format version 3. -// -// It is only used to read version 3 JSON files prior to upgrading them to -// the current format. -// -// The differences between version 2 and version 3 are only in the data and -// not in the structure, so stateV3 actually shares the same structs as -// stateV2. Type stateV3 represents that the data within is formatted as -// expected by the V3 format, rather than the V2 format. -type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go deleted file mode 100644 index 4e38aca3..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go +++ /dev/null @@ -1,488 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { - - if old.Serial < 0 { - // The new format is using uint64 here, which should be fine for any - // real state (we only used positive integers in practice) but we'll - // catch this explicitly here to avoid weird behavior if a state file - // has been tampered with in some way. - return nil, fmt.Errorf("state has serial less than zero, which is invalid") - } - - new := &stateV4{ - TerraformVersion: old.TFVersion, - Serial: uint64(old.Serial), - Lineage: old.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - if new.TerraformVersion == "" { - // Older formats considered this to be optional, but now it's required - // and so we'll stub it out with something that's definitely older - // than the version that really created this state. - new.TerraformVersion = "0.0.0" - } - - for _, msOld := range old.Modules { - if len(msOld.Path) < 1 || msOld.Path[0] != "root" { - return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) - } - - // Convert legacy-style module address into our newer address type. - // Since these old formats are only generated by versions of Terraform - // that don't support count and for_each on modules, we can just assume - // all of the modules are unkeyed. - moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) - for i, name := range msOld.Path[1:] { - if !hclsyntax.ValidIdentifier(name) { - // If we don't fail here then we'll produce an invalid state - // version 4 which subsequent operations will reject, so we'll - // fail early here for safety to make sure we can never - // inadvertently commit an invalid snapshot to a backend. - return nil, fmt.Errorf("state contains invalid module path %#v: %q is not a valid identifier; rename it in Terraform 0.11 before upgrading to Terraform 0.12", msOld.Path, name) - } - moduleAddr[i] = addrs.ModuleInstanceStep{ - Name: name, - InstanceKey: addrs.NoKey, - } - } - - // In a v3 state file, a "resource state" is actually an instance - // state, so we need to fill in a missing level of heirarchy here - // by lazily creating resource states as we encounter them. - // We'll track them in here, keyed on the string representation of - // the resource address. - resourceStates := map[string]*resourceStateV4{} - - for legacyAddr, rsOld := range msOld.Resources { - instAddr, err := parseLegacyResourceAddress(legacyAddr) - if err != nil { - return nil, err - } - - resAddr := instAddr.Resource - rs, exists := resourceStates[resAddr.String()] - if !exists { - var modeStr string - switch resAddr.Mode { - case addrs.ManagedResourceMode: - modeStr = "managed" - case addrs.DataResourceMode: - modeStr = "data" - default: - return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) - } - - // In state versions prior to 4 we allowed each instance of a - // resource to have its own provider configuration address, - // which makes no real sense in practice because providers - // are associated with resources in the configuration. We - // elevate that to the resource level during this upgrade, - // implicitly taking the provider address of the first instance - // we encounter for each resource. While this is lossy in - // theory, in practice there is no reason for these values to - // differ between instances. - var providerAddr addrs.AbsProviderConfig - oldProviderAddr := rsOld.Provider - if strings.Contains(oldProviderAddr, "provider.") { - // Smells like a new-style provider address, but we'll test it. - var diags tfdiags.Diagnostics - providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) - if diags.HasErrors() { - if strings.Contains(oldProviderAddr, "${") { - // There seems to be a common misconception that - // interpolation was valid in provider aliases - // in 0.11, so we'll use a specialized error - // message for that case. - return nil, fmt.Errorf("invalid provider config reference %q for %s: this alias seems to contain a template interpolation sequence, which was not supported but also not error-checked in Terraform 0.11. To proceed, rename the associated provider alias to a valid identifier and apply the change with Terraform 0.11 before upgrading to Terraform 0.12", oldProviderAddr, instAddr) - } - return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - } else { - // Smells like an old-style module-local provider address, - // which we'll need to migrate. We'll assume it's referring - // to the same module the resource is in, which might be - // incorrect but it'll get fixed up next time any updates - // are made to an instance. - if oldProviderAddr != "" { - localAddr, diags := configs.ParseProviderConfigCompactStr(oldProviderAddr) - if diags.HasErrors() { - if strings.Contains(oldProviderAddr, "${") { - // There seems to be a common misconception that - // interpolation was valid in provider aliases - // in 0.11, so we'll use a specialized error - // message for that case. - return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: this alias seems to contain a template interpolation sequence, which was not supported but also not error-checked in Terraform 0.11. To proceed, rename the associated provider alias to a valid identifier and apply the change with Terraform 0.11 before upgrading to Terraform 0.12", oldProviderAddr, instAddr) - } - return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) - } - providerAddr = localAddr.Absolute(moduleAddr) - } else { - providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) - } - } - - rs = &resourceStateV4{ - Module: moduleAddr.String(), - Mode: modeStr, - Type: resAddr.Type, - Name: resAddr.Name, - Instances: []instanceObjectStateV4{}, - ProviderConfig: providerAddr.String(), - } - resourceStates[resAddr.String()] = rs - } - - // Now we'll deal with the instance itself, which may either be - // the first instance in a resource we just created or an additional - // instance for a resource added on a prior loop. - instKey := instAddr.Key - if isOld := rsOld.Primary; isOld != nil { - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) - if err != nil { - return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - for i, isOld := range rsOld.Deposed { - // When we migrate old instances we'll use sequential deposed - // keys just so that the upgrade result is deterministic. New - // deposed keys allocated moving forward will be pseudorandomly - // selected, but we check for collisions and so these - // non-random ones won't hurt. - deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) - isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) - if err != nil { - return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) - } - rs.Instances = append(rs.Instances, *isNew) - } - - if instKey != addrs.NoKey && rs.EachMode == "" { - rs.EachMode = "list" - } - } - - for _, rs := range resourceStates { - new.Resources = append(new.Resources, *rs) - } - - if len(msOld.Path) == 1 && msOld.Path[0] == "root" { - // We'll migrate the outputs for this module too, then. - for name, oldOS := range msOld.Outputs { - newOS := outputStateV4{ - Sensitive: oldOS.Sensitive, - } - - valRaw := oldOS.Value - valSrc, err := json.Marshal(valRaw) - if err != nil { - // Should never happen, because this value came from JSON - // in the first place and so we're just round-tripping here. - return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) - } - - // The "type" field in state V2 wasn't really that useful - // since it was only able to capture string vs. list vs. map. - // For this reason, during upgrade we'll just discard it - // altogether and use cty's idea of the implied type of - // turning our old value into JSON. - ty, err := ctyjson.ImpliedType(valSrc) - if err != nil { - // REALLY should never happen, because we literally just - // encoded this as JSON above! - return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) - } - - // ImpliedType tends to produce structural types, but since older - // version of Terraform didn't support those a collection type - // is probably what was intended, so we'll see if we can - // interpret our value as one. - ty = simplifyImpliedValueType(ty) - - tySrc, err := ctyjson.MarshalType(ty) - if err != nil { - return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) - } - - newOS.ValueRaw = json.RawMessage(valSrc) - newOS.ValueTypeRaw = json.RawMessage(tySrc) - - new.RootOutputs[name] = newOS - } - } - } - - new.normalize() - - return new, nil -} - -func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { - - // Schema versions were, in prior formats, a private concern of the provider - // SDK, and not a first-class concept in the state format. Here we're - // sniffing for the pre-0.12 SDK's way of representing schema versions - // and promoting it to our first-class field if we find it. We'll ignore - // it if it doesn't look like what the SDK would've written. If this - // sniffing fails then we'll assume schema version 0. - var schemaVersion uint64 - migratedSchemaVersion := false - if raw, exists := isOld.Meta["schema_version"]; exists { - switch tv := raw.(type) { - case string: - v, err := strconv.ParseUint(tv, 10, 64) - if err == nil { - schemaVersion = v - migratedSchemaVersion = true - } - case int: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - case float64: - schemaVersion = uint64(tv) - migratedSchemaVersion = true - } - } - - private := map[string]interface{}{} - for k, v := range isOld.Meta { - if k == "schema_version" && migratedSchemaVersion { - // We're gonna promote this into our first-class schema version field - continue - } - private[k] = v - } - var privateJSON []byte - if len(private) != 0 { - var err error - privateJSON, err = json.Marshal(private) - if err != nil { - // This shouldn't happen, because the Meta values all came from JSON - // originally anyway. - return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) - } - } - - var status string - if isOld.Tainted { - status = "tainted" - } - - var instKeyRaw interface{} - switch tk := instKey.(type) { - case addrs.IntKey: - instKeyRaw = int(tk) - case addrs.StringKey: - instKeyRaw = string(tk) - default: - if instKeyRaw != nil { - return nil, fmt.Errorf("unsupported instance key: %#v", instKey) - } - } - - var attributes map[string]string - if isOld.Attributes != nil { - attributes = make(map[string]string, len(isOld.Attributes)) - for k, v := range isOld.Attributes { - attributes[k] = v - } - } - if isOld.ID != "" { - // As a special case, if we don't already have an "id" attribute and - // yet there's a non-empty first-class ID on the old object then we'll - // create a synthetic id attribute to avoid losing that first-class id. - // In practice this generally arises only in tests where state literals - // are hand-written in a non-standard way; real code prior to 0.12 - // would always force the first-class ID to be copied into the - // id attribute before storing. - if attributes == nil { - attributes = make(map[string]string, len(isOld.Attributes)) - } - if idVal := attributes["id"]; idVal == "" { - attributes["id"] = isOld.ID - } - } - - dependencies := make([]string, 0, len(rsOld.Dependencies)) - for _, v := range rsOld.Dependencies { - depStr, err := parseLegacyDependency(v) - if err != nil { - // We just drop invalid dependencies on the floor here, because - // they tend to get left behind in Terraform 0.11 when resources - // are renamed or moved between modules and there's no automatic - // way to fix them here. In practice it shouldn't hurt to miss - // a few dependency edges in the state because a subsequent plan - // will run a refresh walk first and re-synchronize the - // dependencies with the configuration. - // - // There is one rough edges where this can cause an incorrect - // result, though: If the first command the user runs after - // upgrading to Terraform 0.12 uses -refresh=false and thus - // prevents the dependency reorganization from occurring _and_ - // that initial plan discovered "orphaned" resources (not present - // in configuration any longer) then when the plan is applied the - // destroy ordering will be incorrect for the instances of those - // resources. We expect that is a rare enough situation that it - // isn't a big deal, and even when it _does_ occur it's common for - // the apply to succeed anyway unless many separate resources with - // complex inter-dependencies are all orphaned at once. - log.Printf("statefile: ignoring invalid dependency address %q while upgrading from state version 3 to version 4: %s", v, err) - continue - } - dependencies = append(dependencies, depStr) - } - - return &instanceObjectStateV4{ - IndexKey: instKeyRaw, - Status: status, - Deposed: string(deposedKey), - AttributesFlat: attributes, - DependsOn: dependencies, - SchemaVersion: schemaVersion, - PrivateRaw: privateJSON, - }, nil -} - -// parseLegacyResourceAddress parses the different identifier format used -// state formats before version 4, like "instance.name.0". -func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { - var ret addrs.ResourceInstance - - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - ret.Resource.Mode = addrs.ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - ret.Resource.Mode = addrs.DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { - return ret, fmt.Errorf("invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - ret.Resource.Type = parts[0] - ret.Resource.Name = parts[1] - ret.Key = addrs.NoKey - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) - } - - ret.Key = addrs.IntKey(idx) - } - - return ret, nil -} - -// simplifyImpliedValueType attempts to heuristically simplify a value type -// derived from a legacy stored output value into something simpler that -// is closer to what would've fitted into the pre-v0.12 value type system. -func simplifyImpliedValueType(ty cty.Type) cty.Type { - switch { - case ty.IsTupleType(): - // If all of the element types are the same then we'll make this - // a list instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyTuple) { - // Don't know what the element type would be, then. - return ty - } - - etys := ty.TupleElementTypes() - ety := etys[0] - for _, other := range etys[1:] { - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.List(ety) - - case ty.IsObjectType(): - // If all of the attribute types are the same then we'll make this - // a map instead. This is very likely to be true, since prior versions - // of Terraform did not officially support mixed-type collections. - - if ty.Equals(cty.EmptyObject) { - // Don't know what the element type would be, then. - return ty - } - - atys := ty.AttributeTypes() - var ety cty.Type - for _, other := range atys { - if ety == cty.NilType { - ety = other - continue - } - if !other.Equals(ety) { - // inconsistent types - return ty - } - } - ety = simplifyImpliedValueType(ety) - return cty.Map(ety) - - default: - // No other normalizations are possible - return ty - } -} - -func parseLegacyDependency(s string) (string, error) { - parts := strings.Split(s, ".") - ret := parts[0] - for _, part := range parts[1:] { - if part == "*" { - break - } - if i, err := strconv.Atoi(part); err == nil { - ret = ret + fmt.Sprintf("[%d]", i) - break - } - ret = ret + "." + part - } - - // The result must parse as a reference, or else we'll create an invalid - // state file. - var diags tfdiags.Diagnostics - _, diags = addrs.ParseRefStr(ret) - if diags.HasErrors() { - return "", diags.Err() - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go deleted file mode 100644 index 2cc0677a..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go +++ /dev/null @@ -1,628 +0,0 @@ -package statefile - -import ( - "encoding/json" - "fmt" - "io" - "sort" - - version "github.com/hashicorp/go-version" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - sV4 := &stateV4{} - err := json.Unmarshal(src, sV4) - if err != nil { - diags = diags.Append(jsonUnmarshalDiags(err)) - return nil, diags - } - - file, prepDiags := prepareStateV4(sV4) - diags = diags.Append(prepDiags) - return file, diags -} - -func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var tfVersion *version.Version - if sV4.TerraformVersion != "" { - var err error - tfVersion, err = version.NewVersion(sV4.TerraformVersion) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid Terraform version string", - fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), - )) - } - } - - file := &File{ - TerraformVersion: tfVersion, - Serial: sV4.Serial, - Lineage: sV4.Lineage, - } - - state := states.NewState() - - for _, rsV4 := range sV4.Resources { - rAddr := addrs.Resource{ - Type: rsV4.Type, - Name: rsV4.Name, - } - switch rsV4.Mode { - case "managed": - rAddr.Mode = addrs.ManagedResourceMode - case "data": - rAddr.Mode = addrs.DataResourceMode - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource mode in state", - fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), - )) - continue - } - - moduleAddr := addrs.RootModuleInstance - if rsV4.Module != "" { - var addrDiags tfdiags.Diagnostics - moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) - diags = diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - } - - providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) - diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - - var eachMode states.EachMode - switch rsV4.EachMode { - case "": - eachMode = states.NoEach - case "list": - eachMode = states.EachList - case "map": - eachMode = states.EachMap - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource metadata in state", - fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), - )) - continue - } - - ms := state.EnsureModule(moduleAddr) - - // Ensure the resource container object is present in the state. - ms.SetResourceMeta(rAddr, eachMode, providerAddr) - - for _, isV4 := range rsV4.Instances { - keyRaw := isV4.IndexKey - var key addrs.InstanceKey - switch tk := keyRaw.(type) { - case int: - key = addrs.IntKey(tk) - case float64: - // Since JSON only has one number type, reading from encoding/json - // gives us a float64 here even if the number is whole. - // float64 has a smaller integer range than int, but in practice - // we rarely have more than a few tens of instances and so - // it's unlikely that we'll exhaust the 52 bits in a float64. - key = addrs.IntKey(int(tk)) - case string: - key = addrs.StringKey(tk) - default: - if keyRaw != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), - )) - continue - } - key = addrs.NoKey - } - - instAddr := rAddr.Instance(key) - - obj := &states.ResourceInstanceObjectSrc{ - SchemaVersion: isV4.SchemaVersion, - } - - { - // Instance attributes - switch { - case isV4.AttributesRaw != nil: - obj.AttrsJSON = isV4.AttributesRaw - case isV4.AttributesFlat != nil: - obj.AttrsFlat = isV4.AttributesFlat - default: - // This is odd, but we'll accept it and just treat the - // object has being empty. In practice this should arise - // only from the contrived sort of state objects we tend - // to hand-write inline in tests. - obj.AttrsJSON = []byte{'{', '}'} - } - } - - { - // Status - raw := isV4.Status - switch raw { - case "": - obj.Status = states.ObjectReady - case "tainted": - obj.Status = states.ObjectTainted - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), - )) - continue - } - } - - if raw := isV4.PrivateRaw; len(raw) > 0 { - obj.Private = raw - } - - { - // Allow both the deprecated `depends_on` and new - // `dependencies` to coexist for now so resources can be - // upgraded as they are refreshed. - depsRaw := isV4.DependsOn - deps := make([]addrs.Referenceable, 0, len(depsRaw)) - for _, depRaw := range depsRaw { - ref, refDiags := addrs.ParseRefStr(depRaw) - diags = diags.Append(refDiags) - if refDiags.HasErrors() { - continue - } - if len(ref.Remaining) != 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), - )) - } - if ref.Subject == nil { - // Should never happen - panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) - } - deps = append(deps, ref.Subject) - } - obj.DependsOn = deps - } - - { - depsRaw := isV4.Dependencies - deps := make([]addrs.AbsResource, 0, len(depsRaw)) - for _, depRaw := range depsRaw { - addr, addrDiags := addrs.ParseAbsResourceStr(depRaw) - diags = diags.Append(addrDiags) - if addrDiags.HasErrors() { - continue - } - deps = append(deps, addr) - } - obj.Dependencies = deps - } - - switch { - case isV4.Deposed != "": - dk := states.DeposedKey(isV4.Deposed) - if len(dk) != 8 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource instance metadata in state", - fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), - )) - continue - } - is := ms.ResourceInstance(instAddr) - if is.HasDeposed(dk) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), - )) - continue - } - - ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) - default: - is := ms.ResourceInstance(instAddr) - if is.HasCurrent() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Duplicate resource instance in state", - fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), - )) - continue - } - - ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) - } - } - - // We repeat this after creating the instances because - // SetResourceInstanceCurrent automatically resets this metadata based - // on the incoming objects. That behavior is useful when we're making - // piecemeal updates to the state during an apply, but when we're - // reading the state file we want to reflect its contents exactly. - ms.SetResourceMeta(rAddr, eachMode, providerAddr) - } - - // The root module is special in that we persist its attributes and thus - // need to reload them now. (For descendent modules we just re-calculate - // them based on the latest configuration on each run.) - { - rootModule := state.RootModule() - for name, fos := range sV4.RootOutputs { - os := &states.OutputValue{} - os.Sensitive = fos.Sensitive - - ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value type in state", - fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), - )) - continue - } - - val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid output value saved in state", - fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), - )) - continue - } - - os.Value = val - rootModule.OutputValues[name] = os - } - } - - file.State = state - return file, diags -} - -func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { - // Here we'll convert back from the "File" representation to our - // stateV4 struct representation and write that. - // - // While we support legacy state formats for reading, we only support the - // latest for writing and so if a V5 is added in future then this function - // should be deleted and replaced with a writeStateV5, even though the - // read/prepare V4 functions above would stick around. - - var diags tfdiags.Diagnostics - if file == nil || file.State == nil { - panic("attempt to write nil state to file") - } - - var terraformVersion string - if file.TerraformVersion != nil { - terraformVersion = file.TerraformVersion.String() - } - - sV4 := &stateV4{ - TerraformVersion: terraformVersion, - Serial: file.Serial, - Lineage: file.Lineage, - RootOutputs: map[string]outputStateV4{}, - Resources: []resourceStateV4{}, - } - - for name, os := range file.State.RootModule().OutputValues { - src, err := ctyjson.Marshal(os.Value, os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), - )) - continue - } - - typeSrc, err := ctyjson.MarshalType(os.Value.Type()) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize output value in state", - fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), - )) - continue - } - - sV4.RootOutputs[name] = outputStateV4{ - Sensitive: os.Sensitive, - ValueRaw: json.RawMessage(src), - ValueTypeRaw: json.RawMessage(typeSrc), - } - } - - for _, ms := range file.State.Modules { - moduleAddr := ms.Addr - for _, rs := range ms.Resources { - resourceAddr := rs.Addr - - var mode string - switch resourceAddr.Mode { - case addrs.ManagedResourceMode: - mode = "managed" - case addrs.DataResourceMode: - mode = "data" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), - )) - continue - } - - var eachMode string - switch rs.EachMode { - case states.NoEach: - eachMode = "" - case states.EachList: - eachMode = "list" - case states.EachMap: - eachMode = "map" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource in state", - fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), - )) - continue - } - - sV4.Resources = append(sV4.Resources, resourceStateV4{ - Module: moduleAddr.String(), - Mode: mode, - Type: resourceAddr.Type, - Name: resourceAddr.Name, - EachMode: eachMode, - ProviderConfig: rs.ProviderConfig.String(), - Instances: []instanceObjectStateV4{}, - }) - rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) - - for key, is := range rs.Instances { - if is.HasCurrent() { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, is.Current, states.NotDeposed, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - for dk, obj := range is.Deposed { - var objDiags tfdiags.Diagnostics - rsV4.Instances, objDiags = appendInstanceObjectStateV4( - rs, is, key, obj, dk, - rsV4.Instances, - ) - diags = diags.Append(objDiags) - } - } - } - } - - sV4.normalize() - - src, err := json.MarshalIndent(sV4, "", " ") - if err != nil { - // Shouldn't happen if we do our conversion to *stateV4 correctly above. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize state", - fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), - )) - return diags - } - src = append(src, '\n') - - _, err = w.Write(src) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to write state", - fmt.Sprintf("An error occured while writing the serialized state: %s.", err), - )) - return diags - } - - return diags -} - -func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - var status string - switch obj.Status { - case states.ObjectReady: - status = "" - case states.ObjectTainted: - status = "tainted" - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), - )) - } - - var privateRaw []byte - if len(obj.Private) > 0 { - privateRaw = obj.Private - } - - deps := make([]string, len(obj.Dependencies)) - for i, depAddr := range obj.Dependencies { - deps[i] = depAddr.String() - } - - depOn := make([]string, len(obj.DependsOn)) - for i, depAddr := range obj.DependsOn { - depOn[i] = depAddr.String() - } - - var rawKey interface{} - switch tk := key.(type) { - case addrs.IntKey: - rawKey = int(tk) - case addrs.StringKey: - rawKey = string(tk) - default: - if key != addrs.NoKey { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to serialize resource instance in state", - fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), - )) - } - } - - return append(isV4s, instanceObjectStateV4{ - IndexKey: rawKey, - Deposed: string(deposed), - Status: status, - SchemaVersion: obj.SchemaVersion, - AttributesFlat: obj.AttrsFlat, - AttributesRaw: obj.AttrsJSON, - PrivateRaw: privateRaw, - Dependencies: deps, - DependsOn: depOn, - }), diags -} - -type stateV4 struct { - Version stateVersionV4 `json:"version"` - TerraformVersion string `json:"terraform_version"` - Serial uint64 `json:"serial"` - Lineage string `json:"lineage"` - RootOutputs map[string]outputStateV4 `json:"outputs"` - Resources []resourceStateV4 `json:"resources"` -} - -// normalize makes some in-place changes to normalize the way items are -// stored to ensure that two functionally-equivalent states will be stored -// identically. -func (s *stateV4) normalize() { - sort.Stable(sortResourcesV4(s.Resources)) - for _, rs := range s.Resources { - sort.Stable(sortInstancesV4(rs.Instances)) - } -} - -type outputStateV4 struct { - ValueRaw json.RawMessage `json:"value"` - ValueTypeRaw json.RawMessage `json:"type"` - Sensitive bool `json:"sensitive,omitempty"` -} - -type resourceStateV4 struct { - Module string `json:"module,omitempty"` - Mode string `json:"mode"` - Type string `json:"type"` - Name string `json:"name"` - EachMode string `json:"each,omitempty"` - ProviderConfig string `json:"provider"` - Instances []instanceObjectStateV4 `json:"instances"` -} - -type instanceObjectStateV4 struct { - IndexKey interface{} `json:"index_key,omitempty"` - Status string `json:"status,omitempty"` - Deposed string `json:"deposed,omitempty"` - - SchemaVersion uint64 `json:"schema_version"` - AttributesRaw json.RawMessage `json:"attributes,omitempty"` - AttributesFlat map[string]string `json:"attributes_flat,omitempty"` - - PrivateRaw []byte `json:"private,omitempty"` - - Dependencies []string `json:"dependencies,omitempty"` - DependsOn []string `json:"depends_on,omitempty"` -} - -// stateVersionV4 is a weird special type we use to produce our hard-coded -// "version": 4 in the JSON serialization. -type stateVersionV4 struct{} - -func (sv stateVersionV4) MarshalJSON() ([]byte, error) { - return []byte{'4'}, nil -} - -func (sv stateVersionV4) UnmarshalJSON([]byte) error { - // Nothing to do: we already know we're version 4 - return nil -} - -type sortResourcesV4 []resourceStateV4 - -func (sr sortResourcesV4) Len() int { return len(sr) } -func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } -func (sr sortResourcesV4) Less(i, j int) bool { - switch { - case sr[i].Mode != sr[j].Mode: - return sr[i].Mode < sr[j].Mode - case sr[i].Type != sr[j].Type: - return sr[i].Type < sr[j].Type - case sr[i].Name != sr[j].Name: - return sr[i].Name < sr[j].Name - default: - return false - } -} - -type sortInstancesV4 []instanceObjectStateV4 - -func (si sortInstancesV4) Len() int { return len(si) } -func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } -func (si sortInstancesV4) Less(i, j int) bool { - ki := si[i].IndexKey - kj := si[j].IndexKey - if ki != kj { - if (ki == nil) != (kj == nil) { - return ki == nil - } - if kii, isInt := ki.(int); isInt { - if kji, isInt := kj.(int); isInt { - return kii < kji - } - return true - } - if kis, isStr := ki.(string); isStr { - if kjs, isStr := kj.(string); isStr { - return kis < kjs - } - return true - } - } - if si[i].Deposed != si[j].Deposed { - return si[i].Deposed < si[j].Deposed - } - return false -} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/write.go b/vendor/github.com/hashicorp/terraform/states/statefile/write.go deleted file mode 100644 index 548ba8a8..00000000 --- a/vendor/github.com/hashicorp/terraform/states/statefile/write.go +++ /dev/null @@ -1,17 +0,0 @@ -package statefile - -import ( - "io" - - tfversion "github.com/hashicorp/terraform/version" -) - -// Write writes the given state to the given writer in the current state -// serialization format. -func Write(s *File, w io.Writer) error { - // Always record the current terraform version in the state. - s.TerraformVersion = tfversion.SemVer - - diags := writeStateV4(s, w) - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform/states/sync.go b/vendor/github.com/hashicorp/terraform/states/sync.go deleted file mode 100644 index 47fb16d6..00000000 --- a/vendor/github.com/hashicorp/terraform/states/sync.go +++ /dev/null @@ -1,537 +0,0 @@ -package states - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// SyncState is a wrapper around State that provides concurrency-safe access to -// various common operations that occur during a Terraform graph walk, or other -// similar concurrent contexts. -// -// When a SyncState wrapper is in use, no concurrent direct access to the -// underlying objects is permitted unless the caller first acquires an explicit -// lock, using the Lock and Unlock methods. Most callers should _not_ -// explicitly lock, and should instead use the other methods of this type that -// handle locking automatically. -// -// Since SyncState is able to safely consolidate multiple updates into a single -// atomic operation, many of its methods are at a higher level than those -// of the underlying types, and operate on the state as a whole rather than -// on individual sub-structures of the state. -// -// SyncState can only protect against races within its own methods. It cannot -// provide any guarantees about the order in which concurrent operations will -// be processed, so callers may still need to employ higher-level techniques -// for ensuring correct operation sequencing, such as building and walking -// a dependency graph. -type SyncState struct { - state *State - lock sync.RWMutex -} - -// Module returns a snapshot of the state of the module instance with the given -// address, or nil if no such module is tracked. -// -// The return value is a pointer to a copy of the module state, which the -// caller may then freely access and mutate. However, since the module state -// tends to be a large data structure with many child objects, where possible -// callers should prefer to use a more granular accessor to access a child -// module directly, and thus reduce the amount of copying required. -func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { - s.lock.RLock() - ret := s.state.Module(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// RemoveModule removes the entire state for the given module, taking with -// it any resources associated with the module. This should generally be -// called only for modules whose resources have all been destroyed, but -// that is not enforced by this method. -func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) { - s.lock.Lock() - defer s.lock.Unlock() - - s.state.RemoveModule(addr) -} - -// OutputValue returns a snapshot of the state of the output value with the -// given address, or nil if no such output value is tracked. -// -// The return value is a pointer to a copy of the output value state, which the -// caller may then freely access and mutate. -func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { - s.lock.RLock() - ret := s.state.OutputValue(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// SetOutputValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the output is not yet tracked in state then it -// be added as a side-effect. -func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) -} - -// RemoveOutputValue removes the stored value for the output value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveOutputValue(addr.OutputValue.Name) - s.maybePruneModule(addr.Module) -} - -// LocalValue returns the current value associated with the given local value -// address. -func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { - s.lock.RLock() - // cty.Value is immutable, so we don't need any extra copying here. - ret := s.state.LocalValue(addr) - s.lock.RUnlock() - return ret -} - -// SetLocalValue writes a given output value into the state, overwriting -// any existing value of the same name. -// -// If the module containing the local value is not yet tracked in state then it -// will be added as a side-effect. -func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetLocalValue(addr.LocalValue.Name, value) -} - -// RemoveLocalValue removes the stored value for the local value with the -// given address. -// -// If this results in its containing module being empty, the module will be -// pruned from the state as a side-effect. -func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.RemoveLocalValue(addr.LocalValue.Name) - s.maybePruneModule(addr.Module) -} - -// Resource returns a snapshot of the state of the resource with the given -// address, or nil if no such resource is tracked. -// -// The return value is a pointer to a copy of the resource state, which the -// caller may then freely access and mutate. -func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { - s.lock.RLock() - ret := s.state.Resource(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstance returns a snapshot of the state the resource instance with -// the given address, or nil if no such instance is tracked. -// -// The return value is a pointer to a copy of the instance state, which the -// caller may then freely access and mutate. -func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { - s.lock.RLock() - ret := s.state.ResourceInstance(addr).DeepCopy() - s.lock.RUnlock() - return ret -} - -// ResourceInstanceObject returns a snapshot of the current instance object -// of the given generation belonging to the instance with the given address, -// or nil if no such object is tracked.. -// -// The return value is a pointer to a copy of the object, which the caller may -// then freely access and mutate. -func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { - s.lock.RLock() - defer s.lock.RUnlock() - - inst := s.state.ResourceInstance(addr) - if inst == nil { - return nil - } - return inst.GetGeneration(gen).DeepCopy() -} - -// SetResourceMeta updates the resource-level metadata for the resource at -// the given address, creating the containing module state and resource state -// as a side-effect if not already present. -func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceMeta(addr.Resource, eachMode, provider) -} - -// RemoveResource removes the entire state for the given resource, taking with -// it any instances associated with the resource. This should generally be -// called only for resource objects whose instances have all been destroyed, -// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead -// to safely check first.) -func (s *SyncState) RemoveResource(addr addrs.AbsResource) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.RemoveResource(addr.Resource) - s.maybePruneModule(addr.Module) -} - -// RemoveResourceIfEmpty is similar to RemoveResource but first checks to -// make sure there are no instances or objects left in the resource. -// -// Returns true if the resource was removed, or false if remaining child -// objects prevented its removal. Returns true also if the resource was -// already absent, and thus no action needed to be taken. -func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return true // nothing to do - } - rs := ms.Resource(addr.Resource) - if rs == nil { - return true // nothing to do - } - if len(rs.Instances) != 0 { - // We don't check here for the possibility of instances that exist - // but don't have any objects because it's the responsibility of the - // instance-mutation methods to prune those away automatically. - return false - } - ms.RemoveResource(addr.Resource) - s.maybePruneModule(addr.Module) - return true -} - -// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a -// resource has changed from having "count" set to not set, or vice-versa, and -// so we need to rename the zeroth instance key to no key at all, or vice-versa. -// -// Set countEnabled to true if the resource has count set in its new -// configuration, or false if it does not. -// -// The state is modified in-place if necessary, moving a resource instance -// between the two addresses. The return value is true if a change was made, -// and false otherwise. -func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return false - } - - relAddr := addr.Resource - rs := ms.Resource(relAddr) - if rs == nil { - return false - } - huntKey := addrs.NoKey - replaceKey := addrs.InstanceKey(addrs.IntKey(0)) - if !countEnabled { - huntKey, replaceKey = replaceKey, huntKey - } - - is, exists := rs.Instances[huntKey] - if !exists { - return false - } - - if _, exists := rs.Instances[replaceKey]; exists { - // If the replacement key also exists then we'll do nothing and keep both. - return false - } - - // If we get here then we need to "rename" from hunt to replace - rs.Instances[replaceKey] = is - delete(rs.Instances, huntKey) - return true -} - -// SetResourceInstanceCurrent saves the given instance object as the current -// generation of the resource instance with the given address, simultaneously -// updating the recorded provider configuration address, dependencies, and -// resource EachMode. -// -// Any existing current instance object for the given resource is overwritten. -// Set obj to nil to remove the primary generation object altogether. If there -// are no deposed objects then the instance as a whole will be removed, which -// may in turn also remove the containing module if it becomes empty. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The provider address and "each mode" are resource-wide settings and so they -// are updated for all other instances of the same resource as a side-effect of -// this call. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// SetResourceInstanceDeposed saves the given instance object as a deposed -// generation of the resource instance with the given address and deposed key. -// -// Call this method only for pre-existing deposed objects that already have -// a known DeposedKey. For example, this method is useful if reloading objects -// that were persisted to a state file. To mark the current object as deposed, -// use DeposeResourceInstanceObject instead. -// -// The caller must ensure that the given ResourceInstanceObject is not -// concurrently mutated during this call, but may be freely used again once -// this function returns. -// -// The resource that contains the given instance must already exist in the -// state, or this method will panic. Use Resource to check first if its -// presence is not already guaranteed. -// -// Any existing current instance object for the given resource and deposed key -// is overwritten. Set obj to nil to remove the deposed object altogether. If -// the instance is left with no objects after this operation then it will -// be removed from its containing resource altogether. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then they will be added as a side-effect. -func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.EnsureModule(addr.Module) - ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) - s.maybePruneModule(addr.Module) -} - -// DeposeResourceInstanceObject moves the current instance object for the -// given resource instance address into the deposed set, leaving the instance -// without a current object. -// -// The return value is the newly-allocated deposed key, or NotDeposed if the -// given instance is already lacking a current object. -// -// If the containing module for this resource or the resource itself are not -// already tracked in state then there cannot be a current object for the -// given instance, and so NotDeposed will be returned without modifying the -// state at all. -func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return NotDeposed - } - - return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) -} - -// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject -// but uses a pre-allocated key. It's the caller's responsibility to ensure -// that there aren't any races to use a particular key; this method will panic -// if the given key is already in use. -func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { - s.lock.Lock() - defer s.lock.Unlock() - - if forcedKey == NotDeposed { - // Usage error: should use DeposeResourceInstanceObject in this case - panic("DeposeResourceInstanceObjectForceKey called without forced key") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - return // Nothing to do, since there can't be any current object either. - } - - ms.deposeResourceInstanceObject(addr.Resource, forcedKey) -} - -// ForgetResourceInstanceAll removes the record of all objects associated with -// the specified resource instance, if present. If not present, this is a no-op. -func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.ForgetResourceInstanceAll(addr.Resource) - s.maybePruneModule(addr.Module) -} - -// ForgetResourceInstanceDeposed removes the record of the deposed object with -// the given address and key, if present. If not present, this is a no-op. -func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) { - s.lock.Lock() - defer s.lock.Unlock() - - ms := s.state.Module(addr.Module) - if ms == nil { - return - } - ms.ForgetResourceInstanceDeposed(addr.Resource, key) - s.maybePruneModule(addr.Module) -} - -// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the -// given key on the specified resource as the current object for that instance -// if and only if that would not cause us to forget an existing current -// object for that instance. -// -// Returns true if the object was restored to current, or false if no change -// was made at all. -func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { - s.lock.Lock() - defer s.lock.Unlock() - - if key == NotDeposed { - panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") - } - - ms := s.state.Module(addr.Module) - if ms == nil { - // Nothing to do, since the specified deposed object cannot exist. - return false - } - - return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) -} - -// RemovePlannedResourceInstanceObjects removes from the state any resource -// instance objects that have the status ObjectPlanned, indiciating that they -// are just transient placeholders created during planning. -// -// Note that this does not restore any "ready" or "tainted" object that might -// have been present before the planned object was written. The only real use -// for this method is in preparing the state created during a refresh walk, -// where we run the planning step for certain instances just to create enough -// information to allow correct expression evaluation within provider and -// data resource blocks. Discarding planned instances in that case is okay -// because the refresh phase only creates planned objects to stand in for -// objects that don't exist yet, and thus the planned object must have been -// absent before by definition. -func (s *SyncState) RemovePlannedResourceInstanceObjects() { - // TODO: Merge together the refresh and plan phases into a single walk, - // so we can remove the need to create this "partial plan" during refresh - // that we then need to clean up before proceeding. - - s.lock.Lock() - defer s.lock.Unlock() - - for _, ms := range s.state.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resAddr := rs.Addr - - for ik, is := range rs.Instances { - instAddr := resAddr.Instance(ik) - - if is.Current != nil && is.Current.Status == ObjectPlanned { - // Setting the current instance to nil removes it from the - // state altogether if there are not also deposed instances. - ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) - } - - for dk, obj := range is.Deposed { - // Deposed objects should never be "planned", but we'll - // do this anyway for the sake of completeness. - if obj.Status == ObjectPlanned { - ms.ForgetResourceInstanceDeposed(instAddr, dk) - } - } - } - } - - // We may have deleted some objects, which means that we may have - // left a module empty, and so we must prune to preserve the invariant - // that only the root module is allowed to be empty. - s.maybePruneModule(moduleAddr) - } -} - -// Lock acquires an explicit lock on the state, allowing direct read and write -// access to the returned state object. The caller must call Unlock once -// access is no longer needed, and then immediately discard the state pointer -// pointer. -// -// Most callers should not use this. Instead, use the concurrency-safe -// accessors and mutators provided directly on SyncState. -func (s *SyncState) Lock() *State { - s.lock.Lock() - return s.state -} - -// Unlock releases a lock previously acquired by Lock, at which point the -// caller must cease all use of the state pointer that was returned. -// -// Do not call this method except to end an explicit lock acquired by -// Lock. If a caller calls Unlock without first holding the lock, behavior -// is undefined. -func (s *SyncState) Unlock() { - s.lock.Unlock() -} - -// maybePruneModule will remove a module from the state altogether if it is -// empty, unless it's the root module which must always be present. -// -// This helper method is not concurrency-safe on its own, so must only be -// called while the caller is already holding the lock for writing. -func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { - if addr.IsRoot() { - // We never prune the root. - return - } - - ms := s.state.Module(addr) - if ms == nil { - return - } - - if ms.empty() { - log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) - s.state.RemoveModule(addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go deleted file mode 100644 index 5637083c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ /dev/null @@ -1,890 +0,0 @@ -package terraform - -import ( - "bytes" - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/states/statefile" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// InputMode defines what sort of input will be asked for when Input -// is called on Context. -type InputMode byte - -const ( - // InputModeProvider asks for provider variables - InputModeProvider InputMode = 1 << iota - - // InputModeStd is the standard operating mode and asks for both variables - // and providers. - InputModeStd = InputModeProvider -) - -var ( - // contextFailOnShadowError will cause Context operations to return - // errors when shadow operations fail. This is only used for testing. - contextFailOnShadowError = false - - // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every - // Plan operation, effectively testing the Diff DeepCopy whenever - // a Plan occurs. This is enabled for tests. - contextTestDeepCopyOnPlan = false -) - -// ContextOpts are the user-configurable options to create a context with -// NewContext. -type ContextOpts struct { - Config *configs.Config - Changes *plans.Changes - State *states.State - Targets []addrs.Targetable - Variables InputValues - Meta *ContextMeta - Destroy bool - - Hooks []Hook - Parallelism int - ProviderResolver providers.Resolver - Provisioners map[string]ProvisionerFactory - - // If non-nil, will apply as additional constraints on the provider - // plugins that will be requested from the provider resolver. - ProviderSHA256s map[string][]byte - SkipProviderVerify bool - - UIInput UIInput -} - -// ContextMeta is metadata about the running context. This is information -// that this package or structure cannot determine on its own but exposes -// into Terraform in various ways. This must be provided by the Context -// initializer. -type ContextMeta struct { - Env string // Env is the state environment -} - -// Context represents all the context that Terraform needs in order to -// perform operations on infrastructure. This structure is built using -// NewContext. -type Context struct { - config *configs.Config - changes *plans.Changes - state *states.State - targets []addrs.Targetable - variables InputValues - meta *ContextMeta - destroy bool - - hooks []Hook - components contextComponentFactory - schemas *Schemas - sh *stopHook - uiInput UIInput - - l sync.Mutex // Lock acquired during any task - parallelSem Semaphore - providerInputConfig map[string]map[string]cty.Value - providerSHA256s map[string][]byte - runLock sync.Mutex - runCond *sync.Cond - runContext context.Context - runContextCancel context.CancelFunc - shadowErr error -} - -// (additional methods on Context can be found in context_*.go files.) - -// NewContext creates a new Context structure. -// -// Once a Context is created, the caller must not access or mutate any of -// the objects referenced (directly or indirectly) by the ContextOpts fields. -// -// If the returned diagnostics contains errors then the resulting context is -// invalid and must not be used. -func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { - log.Printf("[TRACE] terraform.NewContext: starting") - diags := CheckCoreVersionRequirements(opts.Config) - // If version constraints are not met then we'll bail early since otherwise - // we're likely to just see a bunch of other errors related to - // incompatibilities, which could be overwhelming for the user. - if diags.HasErrors() { - return nil, diags - } - - // Copy all the hooks and add our stop hook. We don't append directly - // to the Config so that we're not modifying that in-place. - sh := new(stopHook) - hooks := make([]Hook, len(opts.Hooks)+1) - copy(hooks, opts.Hooks) - hooks[len(opts.Hooks)] = sh - - state := opts.State - if state == nil { - state = states.NewState() - } - - // Determine parallelism, default to 10. We do this both to limit - // CPU pressure but also to have an extra guard against rate throttling - // from providers. - par := opts.Parallelism - if par == 0 { - par = 10 - } - - // Set up the variables in the following sequence: - // 0 - Take default values from the configuration - // 1 - Take values from TF_VAR_x environment variables - // 2 - Take values specified in -var flags, overriding values - // set by environment variables if necessary. This includes - // values taken from -var-file in addition. - var variables InputValues - if opts.Config != nil { - // Default variables from the configuration seed our map. - variables = DefaultVariableValues(opts.Config.Module.Variables) - } - // Variables provided by the caller (from CLI, environment, etc) can - // override the defaults. - variables = variables.Override(opts.Variables) - - // Bind available provider plugins to the constraints in config - var providerFactories map[addrs.Provider]providers.Factory - if opts.ProviderResolver != nil { - deps := ConfigTreeDependencies(opts.Config, state) - reqd := deps.AllPluginRequirements() - if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { - reqd.LockExecutables(opts.ProviderSHA256s) - } - log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") - var providerDiags tfdiags.Diagnostics - providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) - diags = diags.Append(providerDiags) - - if diags.HasErrors() { - return nil, diags - } - } else { - providerFactories = make(map[addrs.Provider]providers.Factory) - } - - components := &basicComponentFactory{ - providers: providerFactories, - provisioners: opts.Provisioners, - } - - log.Printf("[TRACE] terraform.NewContext: loading provider schemas") - schemas, err := LoadSchemas(opts.Config, opts.State, components) - if err != nil { - diags = diags.Append(err) - return nil, diags - } - - changes := opts.Changes - if changes == nil { - changes = plans.NewChanges() - } - - config := opts.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - log.Printf("[TRACE] terraform.NewContext: complete") - - // By the time we get here, we should have values defined for all of - // the root module variables, even if some of them are "unknown". It's the - // caller's responsibility to have already handled the decoding of these - // from the various ways the CLI allows them to be set and to produce - // user-friendly error messages if they are not all present, and so - // the error message from checkInputVariables should never be seen and - // includes language asking the user to report a bug. - if config != nil { - varDiags := checkInputVariables(config.Module.Variables, variables) - diags = diags.Append(varDiags) - } - - return &Context{ - components: components, - schemas: schemas, - destroy: opts.Destroy, - changes: changes, - hooks: hooks, - meta: opts.Meta, - config: config, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, - - parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]cty.Value), - providerSHA256s: opts.ProviderSHA256s, - sh: sh, - }, diags -} - -func (c *Context) Schemas() *Schemas { - return c.schemas -} - -type ContextGraphOpts struct { - // If true, validates the graph structure (checks for cycles). - Validate bool - - // Legacy graphs only: won't prune the graph - Verbose bool -} - -// Graph returns the graph used for the given operation type. -// -// The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { - if opts == nil { - opts = &ContextGraphOpts{Validate: true} - } - - log.Printf("[INFO] terraform: building graph: %s", typ) - switch typ { - case GraphTypeApply: - return (&ApplyGraphBuilder{ - Config: c.config, - Changes: c.changes, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeValidate: - // The validate graph is just a slightly modified plan graph - fallthrough - case GraphTypePlan: - // Create the plan graph builder - p := &PlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - } - - // Some special cases for other graph types shared with plan currently - var b GraphBuilder = p - switch typ { - case GraphTypeValidate: - b = ValidateGraphBuilder(p) - } - - return b.Build(addrs.RootModuleInstance) - - case GraphTypePlanDestroy: - return (&DestroyPlanGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeRefresh: - return (&RefreshGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - Targets: c.targets, - Validate: opts.Validate, - }).Build(addrs.RootModuleInstance) - - case GraphTypeEval: - return (&EvalGraphBuilder{ - Config: c.config, - State: c.state, - Components: c.components, - Schemas: c.schemas, - }).Build(addrs.RootModuleInstance) - - default: - // Should never happen, because the above is exhaustive for all graph types. - panic(fmt.Errorf("unsupported graph type %s", typ)) - } -} - -// ShadowError returns any errors caught during a shadow operation. -// -// A shadow operation is an operation run in parallel to a real operation -// that performs the same tasks using new logic on copied state. The results -// are compared to ensure that the new logic works the same as the old logic. -// The shadow never affects the real operation or return values. -// -// The result of the shadow operation are only available through this function -// call after a real operation is complete. -// -// For API consumers of Context, you can safely ignore this function -// completely if you have no interest in helping report experimental feature -// errors to Terraform maintainers. Otherwise, please call this function -// after every operation and report this to the user. -// -// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect -// the real state or result of a real operation. They are purely informational -// to assist in future Terraform versions being more stable. Please message -// this effectively to the end user. -// -// This must be called only when no other operation is running (refresh, -// plan, etc.). The result can be used in parallel to any other operation -// running. -func (c *Context) ShadowError() error { - return c.shadowErr -} - -// State returns a copy of the current state associated with this context. -// -// This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *states.State { - return c.state.DeepCopy() -} - -// Eval produces a scope in which expressions can be evaluated for -// the given module path. -// -// This method must first evaluate any ephemeral values (input variables, local -// values, and output values) in the configuration. These ephemeral values are -// not included in the persisted state, so they must be re-computed using other -// values in the state before they can be properly evaluated. The updated -// values are retained in the main state associated with the receiving context. -// -// This function takes no action against remote APIs but it does need access -// to all provider and provisioner instances in order to obtain their schemas -// for type checking. -// -// The result is an evaluation scope that can be used to resolve references -// against the root module. If the returned diagnostics contains errors then -// the returned scope may be nil. If it is not nil then it may still be used -// to attempt expression evaluation or other analysis, but some expressions -// may not behave as expected. -func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { - // This is intended for external callers such as the "terraform console" - // command. Internally, we create an evaluator in c.walk before walking - // the graph, and create scopes in ContextGraphWalker. - - var diags tfdiags.Diagnostics - defer c.acquireRun("eval")() - - // Start with a copy of state so that we don't affect any instances - // that other methods may have already returned. - c.state = c.state.DeepCopy() - var walker *ContextGraphWalker - - graph, graphDiags := c.Graph(GraphTypeEval, nil) - diags = diags.Append(graphDiags) - if !diags.HasErrors() { - var walkDiags tfdiags.Diagnostics - walker, walkDiags = c.walk(graph, walkEval) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - } - - if walker == nil { - // If we skipped walking the graph (due to errors) then we'll just - // use a placeholder graph walker here, which'll refer to the - // unmodified state. - walker = c.graphWalker(walkEval) - } - - // This is a bit weird since we don't normally evaluate outside of - // the context of a walk, but we'll "re-enter" our desired path here - // just to get hold of an EvalContext for it. GraphContextBuiltin - // caches its contexts, so we should get hold of the context that was - // previously used for evaluation here, unless we skipped walking. - evalCtx := walker.EnterPath(path) - return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags -} - -// Apply applies the changes represented by this context and returns -// the resulting state. -// -// Even in the case an error is returned, the state may be returned and will -// potentially be partially updated. In addition to returning the resulting -// state, this context is updated with the latest state. -// -// If the state is required after an error, the caller should call -// Context.State, rather than rely on the return value. -// -// TODO: Apply and Refresh should either always return a state, or rely on the -// State() method. Currently the helper/resource testing framework relies -// on the absence of a returned state to determine if Destroy can be -// called, so that will need to be refactored before this can be changed. -func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("apply")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Build the graph. - graph, diags := c.Graph(GraphTypeApply, nil) - if diags.HasErrors() { - return nil, diags - } - - // Determine the operation - operation := walkApply - if c.destroy { - operation = walkDestroy - } - - // Walk the graph - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - - if c.destroy && !diags.HasErrors() { - // If we know we were trying to destroy objects anyway, and we - // completed without any errors, then we'll also prune out any - // leftover empty resource husks (left after all of the instances - // of a resource with "count" or "for_each" are destroyed) to - // help ensure we end up with an _actually_ empty state, assuming - // we weren't destroying with -target here. - // - // (This doesn't actually take into account -target, but that should - // be okay because it doesn't throw away anything we can't recompute - // on a subsequent "terraform plan" run, if the resources are still - // present in the configuration. However, this _will_ cause "count = 0" - // resources to read as unknown during the next refresh walk, which - // may cause some additional churn if used in a data resource or - // provider block, until we remove refreshing as a separate walk and - // just do it as part of the plan walk.) - c.state.PruneResourceHusks() - } - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Applied changes may be incomplete", - `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: - terraform plan - -Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - return c.state, diags -} - -// Plan generates an execution plan for the given context. -// -// The execution plan encapsulates the context and can be stored -// in order to reinstantiate a context later for Apply. -// -// Plan also updates the diff of this context to be the diff generated -// by the plan, so Apply can be called after. -func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { - defer c.acquireRun("plan")() - c.changes = plans.NewChanges() - - var diags tfdiags.Diagnostics - - if len(c.targets) > 0 { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Resource targeting is in effect", - `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. - -The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, - )) - } - - varVals := make(map[string]plans.DynamicValue, len(c.variables)) - for k, iv := range c.variables { - // We use cty.DynamicPseudoType here so that we'll save both the - // value _and_ its dynamic type in the plan, so we can recover - // exactly the same value later. - dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) - if err != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to prepare variable value for plan", - fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), - )) - continue - } - varVals[k] = dv - } - - p := &plans.Plan{ - VariableValues: varVals, - TargetAddrs: c.targets, - ProviderSHA256s: c.providerSHA256s, - } - - var operation walkOperation - if c.destroy { - operation = walkPlanDestroy - } else { - // Set our state to be something temporary. We do this so that - // the plan can update a fake state so that variables work, then - // we replace it back with our old state. - old := c.state - if old == nil { - c.state = states.NewState() - } else { - c.state = old.DeepCopy() - } - defer func() { - c.state = old - }() - - operation = walkPlan - } - - // Build the graph. - graphType := GraphTypePlan - if c.destroy { - graphType = GraphTypePlanDestroy - } - graph, graphDiags := c.Graph(graphType, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return nil, diags - } - - // Do the walk - walker, walkDiags := c.walk(graph, operation) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - p.Changes = c.changes - - return p, diags -} - -// Refresh goes through all the resources in the state and refreshes them -// to their latest state. This will update the state that this context -// works with, along with returning it. -// -// Even in the case an error is returned, the state may be returned and -// will potentially be partially updated. -func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { - defer c.acquireRun("refresh")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // Refresh builds a partial changeset as part of its work because it must - // create placeholder stubs for any resource instances that'll be created - // in subsequent plan so that provider configurations and data resources - // can interpolate from them. This plan is always thrown away after - // the operation completes, restoring any existing changeset. - oldChanges := c.changes - defer func() { c.changes = oldChanges }() - c.changes = plans.NewChanges() - - // Build the graph. - graph, diags := c.Graph(GraphTypeRefresh, nil) - if diags.HasErrors() { - return nil, diags - } - - // Do the walk - _, walkDiags := c.walk(graph, walkRefresh) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return nil, diags - } - - // During our walk we will have created planned object placeholders in - // state for resource instances that are in configuration but not yet - // created. These were created only to allow expression evaluation to - // work properly in provider and data blocks during the walk and must - // now be discarded, since a subsequent plan walk is responsible for - // creating these "for real". - // TODO: Consolidate refresh and plan into a single walk, so that the - // refresh walk doesn't need to emulate various aspects of the plan - // walk in order to properly evaluate provider and data blocks. - c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() - - return c.state, diags -} - -// Stop stops the running task. -// -// Stop will block until the task completes. -func (c *Context) Stop() { - log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") - - c.l.Lock() - defer c.l.Unlock() - - // If we're running, then stop - if c.runContextCancel != nil { - log.Printf("[WARN] terraform: run context exists, stopping") - - // Tell the hook we want to stop - c.sh.Stop() - - // Stop the context - c.runContextCancel() - c.runContextCancel = nil - } - - // Grab the condition var before we exit - if cond := c.runCond; cond != nil { - log.Printf("[INFO] terraform: waiting for graceful stop to complete") - cond.Wait() - } - - log.Printf("[WARN] terraform: stop complete") -} - -// Validate performs semantic validation of the configuration, and returning -// any warnings or errors. -// -// Syntax and structural checks are performed by the configuration loader, -// and so are not repeated here. -func (c *Context) Validate() tfdiags.Diagnostics { - defer c.acquireRun("validate")() - - var diags tfdiags.Diagnostics - - // If we have errors at this point then we probably won't be able to - // construct a graph without producing redundant errors, so we'll halt early. - if diags.HasErrors() { - return diags - } - - // Build the graph so we can walk it and run Validate on nodes. - // We also validate the graph generated here, but this graph doesn't - // necessarily match the graph that Plan will generate, so we'll validate the - // graph again later after Planning. - graph, graphDiags := c.Graph(GraphTypeValidate, nil) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return diags - } - - // Walk - walker, walkDiags := c.walk(graph, walkValidate) - diags = diags.Append(walker.NonFatalDiagnostics) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return diags - } - - return diags -} - -// Config returns the configuration tree associated with this context. -func (c *Context) Config() *configs.Config { - return c.config -} - -// Variables will return the mapping of variables that were defined -// for this Context. If Input was called, this mapping may be different -// than what was given. -func (c *Context) Variables() InputValues { - return c.variables -} - -// SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v cty.Value) { - c.variables[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } -} - -func (c *Context) acquireRun(phase string) func() { - // With the run lock held, grab the context lock to make changes - // to the run context. - c.l.Lock() - defer c.l.Unlock() - - // Wait until we're no longer running - for c.runCond != nil { - c.runCond.Wait() - } - - // Build our lock - c.runCond = sync.NewCond(&c.l) - - // Create a new run context - c.runContext, c.runContextCancel = context.WithCancel(context.Background()) - - // Reset the stop hook so we're not stopped - c.sh.Reset() - - // Reset the shadow errors - c.shadowErr = nil - - return c.releaseRun -} - -func (c *Context) releaseRun() { - // Grab the context lock so that we can make modifications to fields - c.l.Lock() - defer c.l.Unlock() - - // End our run. We check if runContext is non-nil because it can be - // set to nil if it was cancelled via Stop() - if c.runContextCancel != nil { - c.runContextCancel() - } - - // Unlock all waiting our condition - cond := c.runCond - c.runCond = nil - cond.Broadcast() - - // Unset the context - c.runContext = nil -} - -func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - - walker := c.graphWalker(operation) - - // Watch for a stop so we can call the provider Stop() API. - watchStop, watchWait := c.watchStop(walker) - - // Walk the real graph, this will block until it completes - diags := graph.Walk(walker) - - // Close the channel so the watcher stops, and wait for it to return. - close(watchStop) - <-watchWait - - return walker, diags -} - -func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { - return &ContextGraphWalker{ - Context: c, - State: c.state.SyncWrapper(), - Changes: c.changes.SyncWrapper(), - Operation: operation, - StopContext: c.runContext, - RootVariableValues: c.variables, - } -} - -// watchStop immediately returns a `stop` and a `wait` chan after dispatching -// the watchStop goroutine. This will watch the runContext for cancellation and -// stop the providers accordingly. When the watch is no longer needed, the -// `stop` chan should be closed before waiting on the `wait` chan. -// The `wait` chan is important, because without synchronizing with the end of -// the watchStop goroutine, the runContext may also be closed during the select -// incorrectly causing providers to be stopped. Even if the graph walk is done -// at that point, stopping a provider permanently cancels its StopContext which -// can cause later actions to fail. -func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { - stop := make(chan struct{}) - wait := make(chan struct{}) - - // get the runContext cancellation channel now, because releaseRun will - // write to the runContext field. - done := c.runContext.Done() - - go func() { - defer close(wait) - // Wait for a stop or completion - select { - case <-done: - // done means the context was canceled, so we need to try and stop - // providers. - case <-stop: - // our own stop channel was closed. - return - } - - // If we're here, we're stopped, trigger the call. - log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") - - { - // Copy the providers so that a misbehaved blocking Stop doesn't - // completely hang Terraform. - walker.providerLock.Lock() - ps := make([]providers.Interface, 0, len(walker.providerCache)) - for _, p := range walker.providerCache { - ps = append(ps, p) - } - defer walker.providerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - - { - // Call stop on all the provisioners - walker.provisionerLock.Lock() - ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) - for _, p := range walker.provisionerCache { - ps = append(ps, p) - } - defer walker.provisionerLock.Unlock() - - for _, p := range ps { - // We ignore the error for now since there isn't any reasonable - // action to take if there is an error here, since the stop is still - // advisory: Terraform will exit once the graph node completes. - p.Stop() - } - } - }() - - return stop, wait -} - -// ShimLegacyState is a helper that takes the legacy state type and -// converts it to the new state type. -// -// This is implemented as a state file upgrade, so it will not preserve -// parts of the state structure that are not included in a serialized state, -// such as the resolved results of any local values, outputs in non-root -// modules, etc. -func ShimLegacyState(legacy *State) (*states.State, error) { - if legacy == nil { - return nil, nil - } - var buf bytes.Buffer - err := WriteState(legacy, &buf) - if err != nil { - return nil, err - } - f, err := statefile.Read(&buf) - if err != nil { - return nil, err - } - return f.State, err -} - -// MustShimLegacyState is a wrapper around ShimLegacyState that panics if -// the conversion does not succeed. This is primarily intended for tests where -// the given legacy state is an object constructed within the test. -func MustShimLegacyState(legacy *State) *states.State { - ret, err := ShimLegacyState(legacy) - if err != nil { - panic(err) - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go deleted file mode 100644 index 74ab5013..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" -) - -// contextComponentFactory is the interface that Context uses -// to initialize various components such as providers and provisioners. -// This factory gets more information than the raw maps using to initialize -// a Context. This information is used for debugging. -type contextComponentFactory interface { - // ResourceProvider creates a new ResourceProvider with the given - // type. The "uid" is a unique identifier for this provider being - // initialized that can be used for internal tracking. - ResourceProvider(typ, uid string) (providers.Interface, error) - ResourceProviders() []string - - // ResourceProvisioner creates a new ResourceProvisioner with the - // given type. The "uid" is a unique identifier for this provisioner - // being initialized that can be used for internal tracking. - ResourceProvisioner(typ, uid string) (provisioners.Interface, error) - ResourceProvisioners() []string -} - -// basicComponentFactory just calls a factory from a map directly. -type basicComponentFactory struct { - providers map[addrs.Provider]providers.Factory - provisioners map[string]ProvisionerFactory -} - -func (c *basicComponentFactory) ResourceProviders() []string { - var result []string - for k := range c.providers { - result = append(result, k.LegacyString()) - } - return result -} - -func (c *basicComponentFactory) ResourceProvisioners() []string { - var result []string - for k := range c.provisioners { - result = append(result, k) - } - - return result -} - -func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { - f, ok := c.providers[addrs.NewLegacyProvider(typ)] - if !ok { - return nil, fmt.Errorf("unknown provider %q", typ) - } - - return f() -} - -func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { - f, ok := c.provisioners[typ] - if !ok { - return nil, fmt.Errorf("unknown provisioner %q", typ) - } - - return f() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go deleted file mode 100644 index 4448d870..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go - -// GraphType is an enum of the type of graph to create with a Context. -// The values of the constants may change so they shouldn't be depended on; -// always use the constant name. -type GraphType byte - -const ( - GraphTypeInvalid GraphType = 0 - GraphTypeLegacy GraphType = iota - GraphTypeRefresh - GraphTypePlan - GraphTypePlanDestroy - GraphTypeApply - GraphTypeValidate - GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. -) - -// GraphTypeMap is a mapping of human-readable string to GraphType. This -// is useful to use as the mechanism for human input for configurable -// graph types. -var GraphTypeMap = map[string]GraphType{ - "apply": GraphTypeApply, - "plan": GraphTypePlan, - "plan-destroy": GraphTypePlanDestroy, - "refresh": GraphTypeRefresh, - "legacy": GraphTypeLegacy, - "validate": GraphTypeValidate, - "eval": GraphTypeEval, -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go deleted file mode 100644 index 313e9094..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ /dev/null @@ -1,83 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportOpts are used as the configuration for Import. -type ImportOpts struct { - // Targets are the targets to import - Targets []*ImportTarget - - // Config is optional, and specifies a config tree that will be loaded - // into the graph and evaluated. This is the source for provider - // configurations. - Config *configs.Config -} - -// ImportTarget is a single resource to import. -type ImportTarget struct { - // Addr is the address for the resource instance that the new object should - // be imported into. - Addr addrs.AbsResourceInstance - - // ID is the ID of the resource to import. This is resource-specific. - ID string - - // ProviderAddr is the address of the provider that should handle the import. - ProviderAddr addrs.AbsProviderConfig -} - -// Import takes already-created external resources and brings them -// under Terraform management. Import requires the exact type, name, and ID -// of the resources to import. -// -// This operation is idempotent. If the requested resource is already -// imported, no changes are made to the state. -// -// Further, this operation also gracefully handles partial state. If during -// an import there is a failure, all previously imported resources remain -// imported. -func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Hold a lock since we can modify our own state here - defer c.acquireRun("import")() - - // Copy our own state - c.state = c.state.DeepCopy() - - // If no module is given, default to the module configured with - // the Context. - config := opts.Config - if config == nil { - config = c.config - } - - // Initialize our graph builder - builder := &ImportGraphBuilder{ - ImportTargets: opts.Targets, - Config: config, - Components: c.components, - Schemas: c.schemas, - } - - // Build the graph! - graph, graphDiags := builder.Build(addrs.RootModuleInstance) - diags = diags.Append(graphDiags) - if graphDiags.HasErrors() { - return c.state, diags - } - - // Walk it - _, walkDiags := c.walk(graph, walkImport) - diags = diags.Append(walkDiags) - if walkDiags.HasErrors() { - return c.state, diags - } - - return c.state, diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_input.go b/vendor/github.com/hashicorp/terraform/terraform/context_input.go deleted file mode 100644 index acc42ae9..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/context_input.go +++ /dev/null @@ -1,183 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" -) - -// Input asks for input to fill unset required arguments in provider -// configurations. -// -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { - // This function used to be responsible for more than it is now, so its - // interface is more general than its current functionality requires. - // It now exists only to handle interactive prompts for provider - // configurations, with other prompts the responsibility of the CLI - // layer prior to calling in to this package. - // - // (Hopefully in future the remaining functionality here can move to the - // CLI layer too in order to avoid this odd situation where core code - // produces UI input prompts.) - - var diags tfdiags.Diagnostics - defer c.acquireRun("input")() - - if c.uiInput == nil { - log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") - return diags - } - - ctx := context.Background() - - if mode&InputModeProvider != 0 { - log.Printf("[TRACE] Context.Input: Prompting for provider arguments") - - // We prompt for input only for provider configurations defined in - // the root module. At the time of writing that is an arbitrary - // restriction, but we have future plans to support "count" and - // "for_each" on modules that will then prevent us from supporting - // input for child module configurations anyway (since we'd need to - // dynamic-expand first), and provider configurations in child modules - // are not recommended since v0.11 anyway, so this restriction allows - // us to keep this relatively simple without significant hardship. - - pcs := make(map[string]*configs.Provider) - pas := make(map[string]addrs.ProviderConfig) - for _, pc := range c.config.Module.ProviderConfigs { - addr := pc.Addr() - pcs[addr.String()] = pc - pas[addr.String()] = addr - log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) - } - // We also need to detect _implied_ provider configs from resources. - // These won't have *configs.Provider objects, but they will still - // exist in the map and we'll just treat them as empty below. - for _, rc := range c.config.Module.ManagedResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) - } - } - for _, rc := range c.config.Module.DataResources { - pa := rc.ProviderConfigAddr() - if pa.Alias != "" { - continue // alias configurations cannot be implied - } - if _, exists := pcs[pa.String()]; !exists { - pcs[pa.String()] = nil - pas[pa.String()] = pa - log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) - } - } - - for pk, pa := range pas { - pc := pcs[pk] // will be nil if this is an implied config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: pk, - QueryPrefix: pk + ".", - UIInput: c.uiInput, - } - - schema := c.schemas.ProviderConfig(pa.Type.LegacyString()) - if schema == nil { - // Could either be an incorrect config or just an incomplete - // mock in tests. We'll let a later pass decide, and just - // ignore this for the purposes of gathering input. - log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) - continue - } - - // For our purposes here we just want to detect if attrbutes are - // set in config at all, so rather than doing a full decode - // (which would require us to prepare an evalcontext, etc) we'll - // use the low-level HCL API to process only the top-level - // structure. - var attrExprs hcl.Attributes // nil if there is no config - if pc != nil && pc.Config != nil { - lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) - content, _, diags := pc.Config.PartialContent(lowLevelSchema) - if diags.HasErrors() { - log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) - continue - } - attrExprs = content.Attributes - } - - keys := make([]string, 0, len(schema.Attributes)) - for key := range schema.Attributes { - keys = append(keys, key) - } - sort.Strings(keys) - - vals := map[string]cty.Value{} - for _, key := range keys { - attrS := schema.Attributes[key] - if attrS.Optional { - continue - } - if attrExprs != nil { - if _, exists := attrExprs[key]; exists { - continue - } - } - if !attrS.Type.Equals(cty.String) { - continue - } - - log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) - rawVal, err := input.Input(ctx, &InputOpts{ - Id: key, - Query: key, - Description: attrS.Description, - }) - if err != nil { - log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) - continue - } - - vals[key] = cty.StringVal(rawVal) - } - - c.providerInputConfig[pk] = vals - log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) - } - } - - return diags -} - -// schemaForInputSniffing returns a transformed version of a given schema -// that marks all attributes as optional, which the Context.Input method can -// use to detect whether a required argument is set without missing arguments -// themselves generating errors. -func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { - ret := &hcl.BodySchema{ - Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), - Blocks: schema.Blocks, - } - - for i, attrS := range schema.Attributes { - ret.Attributes[i] = attrS - ret.Attributes[i].Required = false - } - - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go deleted file mode 100644 index 4e834204..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ /dev/null @@ -1,1451 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/zclconf/go-cty/cty" - - "github.com/mitchellh/copystructure" -) - -// DiffChangeType is an enum with the kind of changes a diff has planned. -type DiffChangeType byte - -const ( - DiffInvalid DiffChangeType = iota - DiffNone - DiffCreate - DiffUpdate - DiffDestroy - DiffDestroyCreate - - // DiffRefresh is only used in the UI for displaying diffs. - // Managed resource reads never appear in plan, and when data source - // reads appear they are represented as DiffCreate in core before - // transforming to DiffRefresh in the UI layer. - DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion -) - -// multiVal matches the index key to a flatmapped set, list or map -var multiVal = regexp.MustCompile(`\.(#|%)$`) - -// Diff tracks the changes that are necessary to apply a configuration -// to an existing infrastructure. -type Diff struct { - // Modules contains all the modules that have a diff - Modules []*ModuleDiff -} - -// Prune cleans out unused structures in the diff without affecting -// the behavior of the diff at all. -// -// This is not safe to call concurrently. This is safe to call on a -// nil Diff. -func (d *Diff) Prune() { - if d == nil { - return - } - - // Prune all empty modules - newModules := make([]*ModuleDiff, 0, len(d.Modules)) - for _, m := range d.Modules { - // If the module isn't empty, we keep it - if !m.Empty() { - newModules = append(newModules, m) - } - } - if len(newModules) == 0 { - newModules = nil - } - d.Modules = newModules -} - -// AddModule adds the module with the given path to the diff. -// -// This should be the preferred method to add module diffs since it -// allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - legacyPath := make([]string, len(path)) - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("diff cannot represent modules with count or for_each keys") - } - - legacyPath[i] = step.Name - } - - m := &ModuleDiff{Path: legacyPath} - m.init() - d.Modules = append(d.Modules, m) - return m -} - -// ModuleByPath is used to lookup the module diff for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { - if d == nil { - return nil - } - for _, mod := range d.Modules { - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// RootModule returns the ModuleState for the root module -func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Empty returns true if the diff has no changes. -func (d *Diff) Empty() bool { - if d == nil { - return true - } - - for _, m := range d.Modules { - if !m.Empty() { - return false - } - } - - return true -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *Diff) Equal(d2 *Diff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Sort the modules - sort.Sort(moduleDiffSort(d.Modules)) - sort.Sort(moduleDiffSort(d2.Modules)) - - // Copy since we have to modify the module destroy flag to false so - // we don't compare that. TODO: delete this when we get rid of the - // destroy flag on modules. - dCopy := d.DeepCopy() - d2Copy := d2.DeepCopy() - for _, m := range dCopy.Modules { - m.Destroy = false - } - for _, m := range d2Copy.Modules { - m.Destroy = false - } - - // Use DeepEqual - return reflect.DeepEqual(dCopy, d2Copy) -} - -// DeepCopy performs a deep copy of all parts of the Diff, making the -// resulting Diff safe to use without modifying this one. -func (d *Diff) DeepCopy() *Diff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*Diff) -} - -func (d *Diff) String() string { - var buf bytes.Buffer - - keys := make([]string, 0, len(d.Modules)) - lookup := make(map[string]*ModuleDiff) - for _, m := range d.Modules { - addr := normalizeModulePath(m.Path) - key := addr.String() - keys = append(keys, key) - lookup[key] = m - } - sort.Strings(keys) - - for _, key := range keys { - m := lookup[key] - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("%s:\n", key)) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return strings.TrimSpace(buf.String()) -} - -func (d *Diff) init() { - if d.Modules == nil { - rootDiff := &ModuleDiff{Path: rootModulePath} - d.Modules = []*ModuleDiff{rootDiff} - } - for _, m := range d.Modules { - m.init() - } -} - -// ModuleDiff tracks the differences between resources to apply within -// a single module. -type ModuleDiff struct { - Path []string - Resources map[string]*InstanceDiff - Destroy bool // Set only by the destroy plan -} - -func (d *ModuleDiff) init() { - if d.Resources == nil { - d.Resources = make(map[string]*InstanceDiff) - } - for _, r := range d.Resources { - r.init() - } -} - -// ChangeType returns the type of changes that the diff for this -// module includes. -// -// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or -// DiffCreate. If an instance within the module has a DiffDestroyCreate -// then this will register as a DiffCreate for a module. -func (d *ModuleDiff) ChangeType() DiffChangeType { - result := DiffNone - for _, r := range d.Resources { - change := r.ChangeType() - switch change { - case DiffCreate, DiffDestroy: - if result == DiffNone { - result = change - } - case DiffDestroyCreate, DiffUpdate: - result = DiffUpdate - } - } - - return result -} - -// Empty returns true if the diff has no changes within this module. -func (d *ModuleDiff) Empty() bool { - if d.Destroy { - return false - } - - if len(d.Resources) == 0 { - return true - } - - for _, rd := range d.Resources { - if !rd.Empty() { - return false - } - } - - return true -} - -// Instances returns the instance diffs for the id given. This can return -// multiple instance diffs if there are counts within the resource. -func (d *ModuleDiff) Instances(id string) []*InstanceDiff { - var result []*InstanceDiff - for k, diff := range d.Resources { - if k == id || strings.HasPrefix(k, id+".") { - if !diff.Empty() { - result = append(result, diff) - } - } - } - - return result -} - -// IsRoot says whether or not this module diff is for the root module. -func (d *ModuleDiff) IsRoot() bool { - return reflect.DeepEqual(d.Path, rootModulePath) -} - -// String outputs the diff in a long but command-line friendly output -// format that users can read to quickly inspect a diff. -func (d *ModuleDiff) String() string { - var buf bytes.Buffer - - names := make([]string, 0, len(d.Resources)) - for name, _ := range d.Resources { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - rdiff := d.Resources[name] - - crud := "UPDATE" - switch { - case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): - crud = "DESTROY/CREATE" - case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): - crud = "DESTROY" - case rdiff.RequiresNew(): - crud = "CREATE" - } - - extra := "" - if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { - extra = " (deposed only)" - } - - buf.WriteString(fmt.Sprintf( - "%s: %s%s\n", - crud, - name, - extra)) - - keyLen := 0 - rdiffAttrs := rdiff.CopyAttributes() - keys := make([]string, 0, len(rdiffAttrs)) - for key, _ := range rdiffAttrs { - if key == "id" { - continue - } - - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(keys) - - for _, attrK := range keys { - attrDiff, _ := rdiff.GetAttribute(attrK) - - v := attrDiff.New - u := attrDiff.Old - if attrDiff.NewComputed { - v = "" - } - - if attrDiff.Sensitive { - u = "" - v = "" - } - - updateMsg := "" - if attrDiff.RequiresNew { - updateMsg = " (forces new resource)" - } else if attrDiff.Sensitive { - updateMsg = " (attribute changed)" - } - - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } - } - - return buf.String() -} - -// InstanceDiff is the diff of a resource from some state to another. -type InstanceDiff struct { - mu sync.Mutex - Attributes map[string]*ResourceAttrDiff - Destroy bool - DestroyDeposed bool - DestroyTainted bool - - // Meta is a simple K/V map that is stored in a diff and persisted to - // plans but otherwise is completely ignored by Terraform core. It is - // meant to be used for additional data a resource may want to pass through. - // The value here must only contain Go primitives and collections. - Meta map[string]interface{} -} - -func (d *InstanceDiff) Lock() { d.mu.Lock() } -func (d *InstanceDiff) Unlock() { d.mu.Unlock() } - -// ApplyToValue merges the receiver into the given base value, returning a -// new value that incorporates the planned changes. The given value must -// conform to the given schema, or this method will panic. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { - // Create an InstanceState attributes from our existing state. - // We can use this to more easily apply the diff changes. - attrs := hcl2shim.FlatmapValueFromHCL2(base) - applied, err := d.Apply(attrs, schema) - if err != nil { - return base, err - } - - val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) - if err != nil { - return base, err - } - - return schema.CoerceValue(val) -} - -// Apply applies the diff to the provided flatmapped attributes, -// returning the new instance attributes. -// -// This method is intended for shimming old subsystems that still use this -// legacy diff type to work with the new-style types. -func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - // We always build a new value here, even if the given diff is "empty", - // because we might be planning to create a new instance that happens - // to have no attributes set, and so we want to produce an empty object - // rather than just echoing back the null old value. - if attrs == nil { - attrs = map[string]string{} - } - - // Rather applying the diff to mutate the attrs, we'll copy new values into - // here to avoid the possibility of leaving stale values. - result := map[string]string{} - - if d.Destroy || d.DestroyDeposed || d.DestroyTainted { - return result, nil - } - - return d.applyBlockDiff(nil, attrs, schema) -} - -func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - result := map[string]string{} - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - // localPrefix is used to build the local result map - localPrefix := "" - if name != "" { - localPrefix = name + "." - } - - // iterate over the schema rather than the attributes, so we can handle - // different block types separately from plain attributes - for n, attrSchema := range schema.Attributes { - var err error - newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) - - if err != nil { - return result, err - } - - for k, v := range newAttrs { - result[localPrefix+k] = v - } - } - - blockPrefix := strings.Join(path, ".") - if blockPrefix != "" { - blockPrefix += "." - } - for n, block := range schema.BlockTypes { - // we need to find the set of all keys that traverse this block - candidateKeys := map[string]bool{} - blockKey := blockPrefix + n + "." - localBlockPrefix := localPrefix + n + "." - - // we can only trust the diff for sets, since the path changes, so don't - // count existing values as candidate keys. If it turns out we're - // keeping the attributes, we will catch it down below with "keepBlock" - // after we check the set count. - if block.Nesting != configschema.NestingSet { - for k := range attrs { - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - } - - for k, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if strings.HasPrefix(k, blockKey) { - nextDot := strings.Index(k[len(blockKey):], ".") - if nextDot < 0 { - continue - } - - if diff.NewRemoved { - continue - } - - nextDot += len(blockKey) - candidateKeys[k[len(blockKey):nextDot]] = true - } - } - - // check each set candidate to see if it was removed. - // we need to do this, because when entire sets are removed, they may - // have the wrong key, and ony show diffs going to "" - if block.Nesting == configschema.NestingSet { - for k := range candidateKeys { - indexPrefix := strings.Join(append(path, n, k), ".") + "." - keep := false - // now check each set element to see if it's a new diff, or one - // that we're dropping. Since we're only applying the "New" - // portion of the set, we can ignore diffs that only contain "Old" - for attr, diff := range d.Attributes { - // helper/schema should not insert nil diff values, but don't panic - // if it does. - if diff == nil { - continue - } - - if !strings.HasPrefix(attr, indexPrefix) { - continue - } - - // check for empty "count" keys - if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { - continue - } - - // removed items don't count either - if diff.NewRemoved { - continue - } - - // this must be a diff to keep - keep = true - break - } - if !keep { - delete(candidateKeys, k) - } - } - } - - for k := range candidateKeys { - newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) - if err != nil { - return result, err - } - - for attr, v := range newAttrs { - result[localBlockPrefix+attr] = v - } - } - - keepBlock := true - // check this block's count diff directly first, since we may not - // have candidates because it was removed and only set to "0" - if diff, ok := d.Attributes[blockKey+"#"]; ok { - if diff.New == "0" || diff.NewRemoved { - keepBlock = false - } - } - - // if there was no diff at all, then we need to keep the block attributes - if len(candidateKeys) == 0 && keepBlock { - for k, v := range attrs { - if strings.HasPrefix(k, blockKey) { - // we need the key relative to this block, so remove the - // entire prefix, then re-insert the block name. - localKey := localBlockPrefix + k[len(blockKey):] - result[localKey] = v - } - } - } - - countAddr := strings.Join(append(path, n, "#"), ".") - if countDiff, ok := d.Attributes[countAddr]; ok { - if countDiff.NewComputed { - result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue - } else { - result[localBlockPrefix+"#"] = countDiff.New - - // While sets are complete, list are not, and we may not have all the - // information to track removals. If the list was truncated, we need to - // remove the extra items from the result. - if block.Nesting == configschema.NestingList && - countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { - length, _ := strconv.Atoi(countDiff.New) - for k := range result { - if !strings.HasPrefix(k, localBlockPrefix) { - continue - } - - index := k[len(localBlockPrefix):] - nextDot := strings.Index(index, ".") - if nextDot < 1 { - continue - } - index = index[:nextDot] - i, err := strconv.Atoi(index) - if err != nil { - // this shouldn't happen since we added these - // ourself, but make note of it just in case. - log.Printf("[ERROR] bad list index in %q: %s", k, err) - continue - } - if i >= length { - delete(result, k) - } - } - } - } - } else if origCount, ok := attrs[countAddr]; ok && keepBlock { - result[localBlockPrefix+"#"] = origCount - } else { - result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) - } - } - - return result, nil -} - -func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - ty := attrSchema.Type - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): - return d.applyCollectionDiff(path, attrs, attrSchema) - case ty.IsSetType(): - return d.applySetDiff(path, attrs, attrSchema) - default: - return d.applySingleAttrDiff(path, attrs, attrSchema) - } -} - -func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - currentKey := strings.Join(path, ".") - - attr := path[len(path)-1] - - result := map[string]string{} - diff := d.Attributes[currentKey] - old, exists := attrs[currentKey] - - if diff != nil && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - // "id" must exist and not be an empty string, or it must be unknown. - // This only applied to top-level "id" fields. - if attr == "id" && len(path) == 1 { - if old == "" { - result[attr] = hcl2shim.UnknownVariableValue - } else { - result[attr] = old - } - return result, nil - } - - // attribute diffs are sometimes missed, so assume no diff means keep the - // old value - if diff == nil { - if exists { - result[attr] = old - } else { - // We need required values, so set those with an empty value. It - // must be set in the config, since if it were missing it would have - // failed validation. - if attrSchema.Required { - // we only set a missing string here, since bool or number types - // would have distinct zero value which shouldn't have been - // lost. - if attrSchema.Type == cty.String { - result[attr] = "" - } - } - } - return result, nil - } - - // check for missmatched diff values - if exists && - old != diff.Old && - old != hcl2shim.UnknownVariableValue && - diff.Old != hcl2shim.UnknownVariableValue { - return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) - } - - if diff.NewRemoved { - // don't set anything in the new value - return map[string]string{}, nil - } - - if diff.Old == diff.New && diff.New == "" { - // this can only be a valid empty string - if attrSchema.Type == cty.String { - result[attr] = "" - } - return result, nil - } - - if attrSchema.Computed && diff.NewComputed { - result[attr] = hcl2shim.UnknownVariableValue - return result, nil - } - - result[attr] = diff.New - - return result, nil -} - -func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - result := map[string]string{} - - prefix := "" - if len(path) > 1 { - prefix = strings.Join(path[:len(path)-1], ".") + "." - } - - name := "" - if len(path) > 0 { - name = path[len(path)-1] - } - - currentKey := prefix + name - - // check the index first for special handling - for k, diff := range d.Attributes { - // check the index value, which can be set, and 0 - if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { - if diff.NewRemoved { - return result, nil - } - - if diff.NewComputed { - result[k[len(prefix):]] = hcl2shim.UnknownVariableValue - return result, nil - } - - // do what the diff tells us to here, so that it's consistent with applies - if diff.New == "0" { - result[k[len(prefix):]] = "0" - return result, nil - } - } - } - - // collect all the keys from the diff and the old state - noDiff := true - keys := map[string]bool{} - for k := range d.Attributes { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noDiff = false - keys[k] = true - } - - noAttrs := true - for k := range attrs { - if !strings.HasPrefix(k, currentKey+".") { - continue - } - noAttrs = false - keys[k] = true - } - - // If there's no diff and no attrs, then there's no value at all. - // This prevents an unexpected zero-count attribute in the attributes. - if noDiff && noAttrs { - return result, nil - } - - idx := "#" - if attrSchema.Type.IsMapType() { - idx = "%" - } - - for k := range keys { - // generate an schema placeholder for the values - elSchema := &configschema.Attribute{ - Type: attrSchema.Type.ElementType(), - } - - res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) - if err != nil { - return result, err - } - - for k, v := range res { - result[name+"."+k] = v - } - } - - // Just like in nested list blocks, for simple lists we may need to fill in - // missing empty strings. - countKey := name + "." + idx - count := result[countKey] - length, _ := strconv.Atoi(count) - - if count != "" && count != hcl2shim.UnknownVariableValue && - attrSchema.Type.Equals(cty.List(cty.String)) { - // insert empty strings into missing indexes - for i := 0; i < length; i++ { - key := fmt.Sprintf("%s.%d", name, i) - if _, ok := result[key]; !ok { - result[key] = "" - } - } - } - - // now check for truncation in any type of list - if attrSchema.Type.IsListType() { - for key := range result { - if key == countKey { - continue - } - - if len(key) <= len(name)+1 { - // not sure what this is, but don't panic - continue - } - - index := key[len(name)+1:] - - // It is possible to have nested sets or maps, so look for another dot - dot := strings.Index(index, ".") - if dot > 0 { - index = index[:dot] - } - - // This shouldn't have any more dots, since the element type is only string. - num, err := strconv.Atoi(index) - if err != nil { - log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) - continue - } - - if num >= length { - delete(result, key) - } - } - } - - // Fill in the count value if it wasn't present in the diff for some reason, - // or if there is no count at all. - _, countDiff := d.Attributes[countKey] - if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { - result[countKey] = countFlatmapContainerValues(countKey, result) - } - - return result, nil -} - -func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { - // We only need this special behavior for sets of object. - if !attrSchema.Type.ElementType().IsObjectType() { - // The normal collection apply behavior will work okay for this one, then. - return d.applyCollectionDiff(path, attrs, attrSchema) - } - - // When we're dealing with a set of an object type we actually want to - // use our normal _block type_ apply behaviors, so we'll construct ourselves - // a synthetic schema that treats the object type as a block type and - // then delegate to our block apply method. - synthSchema := &configschema.Block{ - Attributes: make(map[string]*configschema.Attribute), - } - - for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { - // We can safely make everything into an attribute here because in the - // event that there are nested set attributes we'll end up back in - // here again recursively and can then deal with the next level of - // expansion. - synthSchema.Attributes[name] = &configschema.Attribute{ - Type: ty, - Optional: true, - } - } - - parentPath := path[:len(path)-1] - childName := path[len(path)-1] - containerSchema := &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - childName: { - Nesting: configschema.NestingSet, - Block: *synthSchema, - }, - }, - } - - return d.applyBlockDiff(parentPath, attrs, containerSchema) -} - -// countFlatmapContainerValues returns the number of values in the flatmapped container -// (set, map, list) indexed by key. The key argument is expected to include the -// trailing ".#", or ".%". -func countFlatmapContainerValues(key string, attrs map[string]string) string { - if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - panic(fmt.Sprintf("invalid index value %q", key)) - } - - prefix := key[:len(key)-1] - items := map[string]int{} - - for k := range attrs { - if k == key { - continue - } - if !strings.HasPrefix(k, prefix) { - continue - } - - suffix := k[len(prefix):] - dot := strings.Index(suffix, ".") - if dot > 0 { - suffix = suffix[:dot] - } - - items[suffix]++ - } - return strconv.Itoa(len(items)) -} - -// ResourceAttrDiff is the diff of a single attribute of a resource. -type ResourceAttrDiff struct { - Old string // Old Value - New string // New Value - NewComputed bool // True if new value is computed (unknown currently) - NewRemoved bool // True if this attribute is being removed - NewExtra interface{} // Extra information for the provider - RequiresNew bool // True if change requires new resource - Sensitive bool // True if the data should not be displayed in UI output - Type DiffAttrType -} - -// Empty returns true if the diff for this attr is neutral -func (d *ResourceAttrDiff) Empty() bool { - return d.Old == d.New && !d.NewComputed && !d.NewRemoved -} - -func (d *ResourceAttrDiff) GoString() string { - return fmt.Sprintf("*%#v", *d) -} - -// DiffAttrType is an enum type that says whether a resource attribute -// diff is an input attribute (comes from the configuration) or an -// output attribute (comes as a result of applying the configuration). An -// example input would be "ami" for AWS and an example output would be -// "private_ip". -type DiffAttrType byte - -const ( - DiffAttrUnknown DiffAttrType = iota - DiffAttrInput - DiffAttrOutput -) - -func (d *InstanceDiff) init() { - if d.Attributes == nil { - d.Attributes = make(map[string]*ResourceAttrDiff) - } -} - -func NewInstanceDiff() *InstanceDiff { - return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} -} - -func (d *InstanceDiff) Copy() (*InstanceDiff, error) { - if d == nil { - return nil, nil - } - - dCopy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - return nil, err - } - - return dCopy.(*InstanceDiff), nil -} - -// ChangeType returns the DiffChangeType represented by the diff -// for this single instance. -func (d *InstanceDiff) ChangeType() DiffChangeType { - if d.Empty() { - return DiffNone - } - - if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { - return DiffDestroyCreate - } - - if d.GetDestroy() || d.GetDestroyDeposed() { - return DiffDestroy - } - - if d.RequiresNew() { - return DiffCreate - } - - return DiffUpdate -} - -// Empty returns true if this diff encapsulates no changes. -func (d *InstanceDiff) Empty() bool { - if d == nil { - return true - } - - d.mu.Lock() - defer d.mu.Unlock() - return !d.Destroy && - !d.DestroyTainted && - !d.DestroyDeposed && - len(d.Attributes) == 0 -} - -// Equal compares two diffs for exact equality. -// -// This is different from the Same comparison that is supported which -// checks for operation equality taking into account computed values. Equal -// instead checks for exact equality. -func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { - // If one is nil, they must both be nil - if d == nil || d2 == nil { - return d == d2 - } - - // Use DeepEqual - return reflect.DeepEqual(d, d2) -} - -// DeepCopy performs a deep copy of all parts of the InstanceDiff -func (d *InstanceDiff) DeepCopy() *InstanceDiff { - copy, err := copystructure.Config{Lock: true}.Copy(d) - if err != nil { - panic(err) - } - - return copy.(*InstanceDiff) -} - -func (d *InstanceDiff) GoString() string { - return fmt.Sprintf("*%#v", InstanceDiff{ - Attributes: d.Attributes, - Destroy: d.Destroy, - DestroyTainted: d.DestroyTainted, - DestroyDeposed: d.DestroyDeposed, - }) -} - -// RequiresNew returns true if the diff requires the creation of a new -// resource (implying the destruction of the old). -func (d *InstanceDiff) RequiresNew() bool { - if d == nil { - return false - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.requiresNew() -} - -func (d *InstanceDiff) requiresNew() bool { - if d == nil { - return false - } - - if d.DestroyTainted { - return true - } - - for _, rd := range d.Attributes { - if rd != nil && rd.RequiresNew { - return true - } - } - - return false -} - -func (d *InstanceDiff) GetDestroyDeposed() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyDeposed -} - -func (d *InstanceDiff) SetDestroyDeposed(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyDeposed = b -} - -// These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within the terraform package. -// TODO refactor the locking scheme -func (d *InstanceDiff) SetTainted(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.DestroyTainted = b -} - -func (d *InstanceDiff) GetDestroyTainted() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.DestroyTainted -} - -func (d *InstanceDiff) SetDestroy(b bool) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Destroy = b -} - -func (d *InstanceDiff) GetDestroy() bool { - d.mu.Lock() - defer d.mu.Unlock() - - return d.Destroy -} - -func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { - d.mu.Lock() - defer d.mu.Unlock() - - d.Attributes[key] = attr -} - -func (d *InstanceDiff) DelAttribute(key string) { - d.mu.Lock() - defer d.mu.Unlock() - - delete(d.Attributes, key) -} - -func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { - d.mu.Lock() - defer d.mu.Unlock() - - attr, ok := d.Attributes[key] - return attr, ok -} -func (d *InstanceDiff) GetAttributesLen() int { - d.mu.Lock() - defer d.mu.Unlock() - - return len(d.Attributes) -} - -// Safely copies the Attributes map -func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { - d.mu.Lock() - defer d.mu.Unlock() - - attrs := make(map[string]*ResourceAttrDiff) - for k, v := range d.Attributes { - attrs[k] = v - } - - return attrs -} - -// Same checks whether or not two InstanceDiff's are the "same". When -// we say "same", it is not necessarily exactly equal. Instead, it is -// just checking that the same attributes are changing, a destroy -// isn't suddenly happening, etc. -func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { - // we can safely compare the pointers without a lock - switch { - case d == nil && d2 == nil: - return true, "" - case d == nil || d2 == nil: - return false, "one nil" - case d == d2: - return true, "" - } - - d.mu.Lock() - defer d.mu.Unlock() - - // If we're going from requiring new to NOT requiring new, then we have - // to see if all required news were computed. If so, it is allowed since - // computed may also mean "same value and therefore not new". - oldNew := d.requiresNew() - newNew := d2.RequiresNew() - if oldNew && !newNew { - oldNew = false - - // This section builds a list of ignorable attributes for requiresNew - // by removing off any elements of collections going to zero elements. - // For collections going to zero, they may not exist at all in the - // new diff (and hence RequiresNew == false). - ignoreAttrs := make(map[string]struct{}) - for k, diffOld := range d.Attributes { - if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { - continue - } - - // This case is in here as a protection measure. The bug that this - // code originally fixed (GH-11349) didn't have to deal with computed - // so I'm not 100% sure what the correct behavior is. Best to leave - // the old behavior. - if diffOld.NewComputed { - continue - } - - // We're looking for the case a map goes to exactly 0. - if diffOld.New != "0" { - continue - } - - // Found it! Ignore all of these. The prefix here is stripping - // off the "%" so it is just "k." - prefix := k[:len(k)-1] - for k2, _ := range d.Attributes { - if strings.HasPrefix(k2, prefix) { - ignoreAttrs[k2] = struct{}{} - } - } - } - - for k, rd := range d.Attributes { - if _, ok := ignoreAttrs[k]; ok { - continue - } - - // If the field is requires new and NOT computed, then what - // we have is a diff mismatch for sure. We set that the old - // diff does REQUIRE a ForceNew. - if rd != nil && rd.RequiresNew && !rd.NewComputed { - oldNew = true - break - } - } - } - - if oldNew != newNew { - return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", oldNew, newNew) - } - - // Verify that destroy matches. The second boolean here allows us to - // have mismatching Destroy if we're moving from RequiresNew true - // to false above. Therefore, the second boolean will only pass if - // we're moving from Destroy: true to false as well. - if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { - return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) - } - - // Go through the old diff and make sure the new diff has all the - // same attributes. To start, build up the check map to be all the keys. - checkOld := make(map[string]struct{}) - checkNew := make(map[string]struct{}) - for k, _ := range d.Attributes { - checkOld[k] = struct{}{} - } - for k, _ := range d2.CopyAttributes() { - checkNew[k] = struct{}{} - } - - // Make an ordered list so we are sure the approximated hashes are left - // to process at the end of the loop - keys := make([]string, 0, len(d.Attributes)) - for k, _ := range d.Attributes { - keys = append(keys, k) - } - sort.StringSlice(keys).Sort() - - for _, k := range keys { - diffOld := d.Attributes[k] - - if _, ok := checkOld[k]; !ok { - // We're not checking this key for whatever reason (see where - // check is modified). - continue - } - - // Remove this key since we'll never hit it again - delete(checkOld, k) - delete(checkNew, k) - - _, ok := d2.GetAttribute(k) - if !ok { - // If there's no new attribute, and the old diff expected the attribute - // to be removed, that's just fine. - if diffOld.NewRemoved { - continue - } - - // If the last diff was a computed value then the absense of - // that value is allowed since it may mean the value ended up - // being the same. - if diffOld.NewComputed { - ok = true - } - - // No exact match, but maybe this is a set containing computed - // values. So check if there is an approximate hash in the key - // and if so, try to match the key. - if strings.Contains(k, "~") { - parts := strings.Split(k, ".") - parts2 := append([]string(nil), parts...) - - re := regexp.MustCompile(`^~\d+$`) - for i, part := range parts { - if re.MatchString(part) { - // we're going to consider this the base of a - // computed hash, and remove all longer matching fields - ok = true - - parts2[i] = `\d+` - parts2 = parts2[:i+1] - break - } - } - - re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) - if err != nil { - return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) - } - - for k2, _ := range checkNew { - if re.MatchString(k2) { - delete(checkNew, k2) - } - } - } - - // This is a little tricky, but when a diff contains a computed - // list, set, or map that can only be interpolated after the apply - // command has created the dependent resources, it could turn out - // that the result is actually the same as the existing state which - // would remove the key from the diff. - if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - // Similarly, in a RequiresNew scenario, a list that shows up in the plan - // diff can disappear from the apply diff, which is calculated from an - // empty state. - if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { - ok = true - } - - if !ok { - return false, fmt.Sprintf("attribute mismatch: %s", k) - } - } - - // search for the suffix of the base of a [computed] map, list or set. - match := multiVal.FindStringSubmatch(k) - - if diffOld.NewComputed && len(match) == 2 { - matchLen := len(match[1]) - - // This is a computed list, set, or map, so remove any keys with - // this prefix from the check list. - kprefix := k[:len(k)-matchLen] - for k2, _ := range checkOld { - if strings.HasPrefix(k2, kprefix) { - delete(checkOld, k2) - } - } - for k2, _ := range checkNew { - if strings.HasPrefix(k2, kprefix) { - delete(checkNew, k2) - } - } - } - - // We don't compare the values because we can't currently actually - // guarantee to generate the same value two two diffs created from - // the same state+config: we have some pesky interpolation functions - // that do not behave as pure functions (uuid, timestamp) and so they - // can be different each time a diff is produced. - // FIXME: Re-organize our config handling so that we don't re-evaluate - // expressions when we produce a second comparison diff during - // apply (for EvalCompareDiff). - } - - // Check for leftover attributes - if len(checkNew) > 0 { - extras := make([]string, 0, len(checkNew)) - for attr, _ := range checkNew { - extras = append(extras, attr) - } - return false, - fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) - } - - return true, "" -} - -// moduleDiffSort implements sort.Interface to sort module diffs by path. -type moduleDiffSort []*ModuleDiff - -func (s moduleDiffSort) Len() int { return len(s) } -func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s moduleDiffSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go deleted file mode 100644 index bc9d638a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go +++ /dev/null @@ -1,17 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// DestroyEdge is an edge that represents a standard "destroy" relationship: -// Target depends on Source because Source is destroying. -type DestroyEdge struct { - S, T dag.Vertex -} - -func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } -func (e *DestroyEdge) Source() dag.Vertex { return e.S } -func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go deleted file mode 100644 index 48ed3533..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalNode is the interface that must be implemented by graph nodes to -// evaluate/execute. -type EvalNode interface { - // Eval evaluates this node with the given context. The second parameter - // are the argument values. These will match in order and 1-1 with the - // results of the Args() return value. - Eval(EvalContext) (interface{}, error) -} - -// GraphNodeEvalable is the interface that graph nodes must implement -// to enable valuation. -type GraphNodeEvalable interface { - EvalTree() EvalNode -} - -// EvalEarlyExitError is a special error return value that can be returned -// by eval nodes that does an early exit. -type EvalEarlyExitError struct{} - -func (EvalEarlyExitError) Error() string { return "early exit" } - -// Eval evaluates the given EvalNode with the given context, properly -// evaluating all args in the correct order. -func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { - // Call the lower level eval which doesn't understand early exit, - // and if we early exit, it isn't an error. - result, err := EvalRaw(n, ctx) - if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - return nil, nil - } - } - - return result, err -} - -// EvalRaw is like Eval except that it returns all errors, even if they -// signal something normal such as EvalEarlyExitError. -func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { - path := "unknown" - if ctx != nil { - path = ctx.Path().String() - } - if path == "" { - path = "" - } - - log.Printf("[TRACE] %s: eval: %T", path, n) - output, err := n.Eval(ctx) - if err != nil { - switch err.(type) { - case EvalEarlyExitError: - log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) - case tfdiags.NonFatalError: - log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) - default: - log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) - } - } - - return output, err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go deleted file mode 100644 index 2e3cdfd4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ /dev/null @@ -1,673 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalApply is an EvalNode implementation that writes the diff to -// the full diff. -type EvalApply struct { - Addr addrs.ResourceInstance - Config *configs.Resource - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - CreateNew *bool - Error *error -} - -// TODO: test -func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - change := *n.Change - provider := *n.Provider - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - if state == nil { - state = &states.ResourceInstanceObject{} - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - if n.CreateNew != nil { - *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) - } - - configVal := cty.NullVal(cty.DynamicPseudoType) - if n.Config != nil { - var configDiags tfdiags.Diagnostics - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - - if !configVal.IsWhollyKnown() { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - - log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) - resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: change.Before, - Config: configVal, - PlannedState: change.After, - PlannedPrivate: change.Private, - }) - applyDiags := resp.Diagnostics - if n.Config != nil { - applyDiags = applyDiags.InConfigBody(n.Config.Config) - } - diags = diags.Append(applyDiags) - - // Even if there are errors in the returned diagnostics, the provider may - // have returned a _partial_ state for an object that already exists but - // failed to fully configure, and so the remaining code must always run - // to completion but must be defensive against the new value being - // incomplete. - newVal := resp.NewState - - if newVal == cty.NilVal { - // Providers are supposed to return a partial new value even when errors - // occur, but sometimes they don't and so in that case we'll patch that up - // by just using the prior state, so we'll at least keep track of the - // object for the user to retry. - newVal = change.Before - - // As a special case, we'll set the new value to null if it looks like - // we were trying to execute a delete, because the provider in this case - // probably left the newVal unset intending it to be interpreted as "null". - if change.After.IsNull() { - newVal = cty.NullVal(schema.ImpliedType()) - } - - // Ideally we'd produce an error or warning here if newVal is nil and - // there are no errors in diags, because that indicates a buggy - // provider not properly reporting its result, but unfortunately many - // of our historical test mocks behave in this way and so producing - // a diagnostic here fails hundreds of tests. Instead, we must just - // silently retain the old value for now. Returning a nil value with - // no errors is still always considered a bug in the provider though, - // and should be fixed for any "real" providers that do it. - } - - var conformDiags tfdiags.Diagnostics - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - conformDiags = conformDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - diags = diags.Append(conformDiags) - if conformDiags.HasErrors() { - // Bail early in this particular case, because an object that doesn't - // conform to the schema can't be saved in the state anyway -- the - // serializer will reject it. - return nil, diags.Err() - } - - // After this point we have a type-conforming result object and so we - // must always run to completion to ensure it can be saved. If n.Error - // is set then we must not return a non-nil error, in order to allow - // evaluation to continue to a later point where our state object will - // be saved. - - // By this point there must not be any unknown values remaining in our - // object, because we've applied the change and we can't save unknowns - // in our persistent state. If any are present then we will indicate an - // error (which is always a bug in the provider) but we will also replace - // them with nulls so that we can successfully save the portions of the - // returned value that are known. - if !newVal.IsWhollyKnown() { - // To generate better error messages, we'll go for a walk through the - // value and make a separate diagnostic for each unknown value we - // find. - cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { - if !val.IsKnown() { - pathStr := tfdiags.FormatCtyPath(path) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", - n.Addr.Absolute(ctx.Path()), pathStr, - ), - )) - } - return true, nil - }) - - // NOTE: This operation can potentially be lossy if there are multiple - // elements in a set that differ only by unknown values: after - // replacing with null these will be merged together into a single set - // element. Since we can only get here in the presence of a provider - // bug, we accept this because storing a result here is always a - // best-effort sort of thing. - newVal = cty.UnknownAsNull(newVal) - } - - if change.Action != plans.Delete && !diags.HasErrors() { - // Only values that were marked as unknown in the planned value are allowed - // to change during the apply operation. (We do this after the unknown-ness - // check above so that we also catch anything that became unknown after - // being known during plan.) - // - // If we are returning other errors anyway then we'll give this - // a pass since the other errors are usually the explanation for - // this one and so it's more helpful to let the user focus on the - // root cause rather than distract with this extra problem. - if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - - // The sort of inconsistency we won't catch here is if a known value - // in the plan is changed during apply. That can cause downstream - // problems because a dependent resource would make its own plan based - // on the planned value, and thus get a different result during the - // apply phase. This will usually lead to a "Provider produced invalid plan" - // error that incorrectly blames the downstream resource for the change. - - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent result after apply", - fmt.Sprintf( - "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), - ), - )) - } - } - } - } - - // If a provider returns a null or non-null object at the wrong time then - // we still want to save that but it often causes some confusing behaviors - // where it seems like Terraform is failing to take any action at all, - // so we'll generate some errors to draw attention to it. - if !diags.HasErrors() { - if change.Action == plans.Delete && !newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - if change.Action != plans.Delete && newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - } - - newStatus := states.ObjectReady - - // Sometimes providers return a null value when an operation fails for some - // reason, but we'd rather keep the prior state so that the error can be - // corrected on a subsequent run. We must only do this for null new value - // though, or else we may discard partial updates the provider was able to - // complete. - if diags.HasErrors() && newVal.IsNull() { - // Otherwise, we'll continue but using the prior state as the new value, - // making this effectively a no-op. If the item really _has_ been - // deleted then our next refresh will detect that and fix it up. - // If change.Action is Create then change.Before will also be null, - // which is fine. - newVal = change.Before - - // If we're recovering the previous state, we also want to restore the - // the tainted status of the object. - if state.Status == states.ObjectTainted { - newStatus = states.ObjectTainted - } - } - - var newState *states.ResourceInstanceObject - if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case - newState = &states.ResourceInstanceObject{ - Status: newStatus, - Value: newVal, - Private: resp.Private, - } - } - - // Write the final state - if n.Output != nil { - *n.Output = newState - } - - if diags.HasErrors() { - // If the caller provided an error pointer then they are expected to - // handle the error some other way and we treat our own result as - // success. - if n.Error != nil { - err := diags.Err() - *n.Error = err - log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) - return nil, nil - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalApplyPre is an EvalNode implementation that does the pre-Apply work -type EvalApplyPre struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - change := *n.Change - absAddr := n.Addr.Absolute(ctx.Path()) - - if change == nil { - panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) - } - - if resourceHasUserVisibleApply(n.Addr) { - priorState := change.Before - plannedNewState := change.After - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPost is an EvalNode implementation that does the post-Apply work -type EvalApplyPost struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Error *error -} - -// TODO: test -func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - if resourceHasUserVisibleApply(n.Addr) { - absAddr := n.Addr.Absolute(ctx.Path()) - var newState cty.Value - if state != nil { - newState = state.Value - } else { - newState = cty.NullVal(cty.DynamicPseudoType) - } - var err error - if n.Error != nil { - err = *n.Error - } - - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, n.Gen, newState, err) - }) - if hookErr != nil { - return nil, hookErr - } - } - - return nil, *n.Error -} - -// EvalMaybeTainted is an EvalNode that takes the planned change, new value, -// and possible error from an apply operation and produces a new instance -// object marked as tainted if it appears that a create operation has failed. -// -// This EvalNode never returns an error, to ensure that a subsequent EvalNode -// can still record the possibly-tainted object in the state. -type EvalMaybeTainted struct { - Addr addrs.ResourceInstance - Gen states.Generation - Change **plans.ResourceInstanceChange - State **states.ResourceInstanceObject - Error *error -} - -func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil || n.Change == nil || n.Error == nil { - return nil, nil - } - - state := *n.State - change := *n.Change - err := *n.Error - - // nothing to do if everything went as planned - if err == nil { - return nil, nil - } - - if state != nil && state.Status == states.ObjectTainted { - log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) - return nil, nil - } - - if change.Action == plans.Create { - // If there are errors during a _create_ then the object is - // in an undefined state, and so we'll mark it as tainted so - // we can try again on the next run. - // - // We don't do this for other change actions because errors - // during updates will often not change the remote object at all. - // If there _were_ changes prior to the error, it's the provider's - // responsibility to record the effect of those changes in the - // object value it returned. - log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) - *n.State = state.AsTainted() - } - - return nil, nil -} - -// resourceHasUserVisibleApply returns true if the given resource is one where -// apply actions should be exposed to the user. -// -// Certain resources do apply actions only as an implementation detail, so -// these should not be advertised to code outside of this package. -func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { - // Only managed resources have user-visible apply actions. - // In particular, this excludes data resources since we "apply" these - // only as an implementation detail of removing them from state when - // they are destroyed. (When reading, they don't get here at all because - // we present them as "Refresh" actions.) - return addr.ContainingResource().Mode == addrs.ManagedResourceMode -} - -// EvalApplyProvisioners is an EvalNode implementation that executes -// the provisioners for a resource. -// -// TODO(mitchellh): This should probably be split up into a more fine-grained -// ApplyProvisioner (single) that is looped over. -type EvalApplyProvisioners struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject - ResourceConfig *configs.Resource - CreateNew *bool - Error *error - - // When is the type of provisioner to run at this point - When configs.ProvisionerWhen -} - -// TODO: test -func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - if state == nil { - log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) - return nil, nil - } - if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { - // If we're not creating a new resource, then don't run provisioners - log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) - return nil, nil - } - if state.Status == states.ObjectTainted { - // No point in provisioning an object that is already tainted, since - // it's going to get recreated on the next apply anyway. - log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) - return nil, nil - } - - provs := n.filterProvisioners() - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil, nil - } - - if n.Error != nil && *n.Error != nil { - // We're already tainted, so just return out - return nil, nil - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - err := n.apply(ctx, provs) - if err != nil { - *n.Error = multierror.Append(*n.Error, err) - if n.Error == nil { - return nil, err - } else { - log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) - return nil, nil - } - } - - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { - // Fast path the zero case - if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { - return nil - } - - if len(n.ResourceConfig.Managed.Provisioners) == 0 { - return nil - } - - result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) - for _, p := range n.ResourceConfig.Managed.Provisioners { - if p.When == n.When { - result = append(result, p) - } - } - - return result -} - -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { - var diags tfdiags.Diagnostics - instanceAddr := n.Addr - absAddr := instanceAddr.Absolute(ctx.Path()) - - // If there's a connection block defined directly inside the resource block - // then it'll serve as a base connection configuration for all of the - // provisioners. - var baseConn hcl.Body - if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { - baseConn = n.ResourceConfig.Managed.Connection.Config - } - - for _, prov := range provs { - log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) - - // Get the provisioner - provisioner := ctx.Provisioner(prov.Type) - schema := ctx.ProvisionerSchema(prov.Type) - - var forEach map[string]cty.Value - - // We can't evaluate the for_each expression during a destroy - if n.When != configs.ProvisionerWhenDestroy { - m, forEachDiags := evaluateResourceForEachExpression(n.ResourceConfig.ForEach, ctx) - diags = diags.Append(forEachDiags) - forEach = m - } - - keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) - - // Evaluate the main provisioner configuration. - config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) - diags = diags.Append(configDiags) - - // we can't apply the provisioner if the config has errors - if diags.HasErrors() { - return diags.Err() - } - - // If the provisioner block contains a connection block of its own then - // it can override the base connection configuration, if any. - var localConn hcl.Body - if prov.Connection != nil { - localConn = prov.Connection.Config - } - - var connBody hcl.Body - switch { - case baseConn != nil && localConn != nil: - // Our standard merging logic applies here, similar to what we do - // with _override.tf configuration files: arguments from the - // base connection block will be masked by any arguments of the - // same name in the local connection block. - connBody = configs.MergeBodies(baseConn, localConn) - case baseConn != nil: - connBody = baseConn - case localConn != nil: - connBody = localConn - } - - // start with an empty connInfo - connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) - - if connBody != nil { - var connInfoDiags tfdiags.Diagnostics - connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) - diags = diags.Append(connInfoDiags) - if diags.HasErrors() { - // "on failure continue" setting only applies to failures of the - // provisioner itself, not to invalid configuration. - return diags.Err() - } - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstanceStep(absAddr, prov.Type) - }) - if err != nil { - return err - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(absAddr, prov.Type, msg) - return HookActionContinue, nil - }) - } - - output := CallbackUIOutput{OutputFn: outputFn} - resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: config, - Connection: connInfo, - UIOutput: &output, - }) - applyDiags := resp.Diagnostics.InConfigBody(prov.Config) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) - }) - - switch prov.OnFailure { - case configs.ProvisionerOnFailureContinue: - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) - } else { - // Maybe there are warnings that we still want to see - diags = diags.Append(applyDiags) - } - default: - diags = diags.Append(applyDiags) - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) - return diags.Err() - } - } - - // Deal with the hook - if hookErr != nil { - return hookErr - } - } - - return diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go deleted file mode 100644 index 3272a8e8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/plans" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalPreventDestroy is an EvalNode implementation that returns an -// error if a resource has PreventDestroy configured and the diff -// would destroy the resource. -type EvalCheckPreventDestroy struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Change **plans.ResourceInstanceChange -} - -func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { - return nil, nil - } - - change := *n.Change - preventDestroy := n.Config.Managed.PreventDestroy - - if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Instance cannot be destroyed", - Detail: fmt.Sprintf( - "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", - n.Addr.Absolute(ctx.Path()).String(), - ), - Subject: &n.Config.DeclRange, - }) - return nil, diags.Err() - } - - return nil, nil -} - -const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.` diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go deleted file mode 100644 index ec7a3ae0..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ /dev/null @@ -1,144 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// EvalContext is the interface that is given to eval nodes to execute. -type EvalContext interface { - // Stopped returns a channel that is closed when evaluation is stopped - // via Terraform.Context.Stop() - Stopped() <-chan struct{} - - // Path is the current module path. - Path() addrs.ModuleInstance - - // Hook is used to call hook methods. The callback is called for each - // hook and should return the hook action to take and the error. - Hook(func(Hook) (HookAction, error)) error - - // Input is the UIInput object for interacting with the UI. - Input() UIInput - - // InitProvider initializes the provider with the given type and address, and - // returns the implementation of the resource provider or an error. - // - // It is an error to initialize the same provider more than once. - InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) - - // Provider gets the provider instance with the given address (already - // initialized) or returns nil if the provider isn't initialized. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - // InitProvider must've been called on the EvalContext of the module - // that owns the given provider before calling this method. - Provider(addrs.AbsProviderConfig) providers.Interface - - // ProviderSchema retrieves the schema for a particular provider, which - // must have already been initialized with InitProvider. - // - // This method expects an _absolute_ provider configuration address, since - // resources in one module are able to use providers from other modules. - ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema - - // CloseProvider closes provider connections that aren't needed anymore. - CloseProvider(addrs.ProviderConfig) error - - // ConfigureProvider configures the provider with the given - // configuration. This is a separate context call because this call - // is used to store the provider configuration for inheritance lookups - // with ParentProviderConfig(). - ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics - - // ProviderInput and SetProviderInput are used to configure providers - // from user input. - ProviderInput(addrs.ProviderConfig) map[string]cty.Value - SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) - - // InitProvisioner initializes the provisioner with the given name and - // returns the implementation of the resource provisioner or an error. - // - // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (provisioners.Interface, error) - - // Provisioner gets the provisioner instance with the given name (already - // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) provisioners.Interface - - // ProvisionerSchema retrieves the main configuration schema for a - // particular provisioner, which must have already been initialized with - // InitProvisioner. - ProvisionerSchema(string) *configschema.Block - - // CloseProvisioner closes provisioner connections that aren't needed - // anymore. - CloseProvisioner(string) error - - // EvaluateBlock takes the given raw configuration block and associated - // schema and evaluates it to produce a value of an object type that - // conforms to the implied type of the schema. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - // - // The "key" argument is also optional. If given, it is the instance key - // of the current object within the multi-instance container it belongs - // to. For example, on a resource block with "count" set this should be - // set to a different addrs.IntKey for each instance created from that - // block. Set this to addrs.NoKey if not appropriate. - // - // The returned body is an expanded version of the given body, with any - // "dynamic" blocks replaced with zero or more static blocks. This can be - // used to extract correct source location information about attributes of - // the returned object value. - EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) - - // EvaluateExpr takes the given HCL expression and evaluates it to produce - // a value. - // - // The "self" argument is optional. If given, it is the referenceable - // address that the name "self" should behave as an alias for when - // evaluating. Set this to nil if the "self" object should not be available. - EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) - - // EvaluationScope returns a scope that can be used to evaluate reference - // addresses in this context. - EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope - - // SetModuleCallArguments defines values for the variables of a particular - // child module call. - // - // Calling this function multiple times has merging behavior, keeping any - // previously-set keys that are not present in the new map. - SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) - - // GetVariableValue returns the value provided for the input variable with - // the given address, or cty.DynamicVal if the variable hasn't been assigned - // a value yet. - // - // Most callers should deal with variable values only indirectly via - // EvaluationScope and the other expression evaluation functions, but - // this is provided because variables tend to be evaluated outside of - // the context of the module they belong to and so we sometimes need to - // override the normal expression evaluation behavior. - GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value - - // Changes returns the writer object that can be used to write new proposed - // changes into the global changes set. - Changes() *plans.ChangesSync - - // State returns a wrapper object that provides safe concurrent access to - // the global state. - State() *states.SyncState -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go deleted file mode 100644 index cc00f9e5..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ /dev/null @@ -1,337 +0,0 @@ -package terraform - -import ( - "context" - "fmt" - "log" - "sync" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/version" - - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// BuiltinEvalContext is an EvalContext implementation that is used by -// Terraform by default. -type BuiltinEvalContext struct { - // StopContext is the context used to track whether we're complete - StopContext context.Context - - // PathValue is the Path that this context is operating within. - PathValue addrs.ModuleInstance - - // Evaluator is used for evaluating expressions within the scope of this - // eval context. - Evaluator *Evaluator - - // Schemas is a repository of all of the schemas we should need to - // decode configuration blocks and expressions. This must be constructed by - // the caller to include schemas for all of the providers, resource types, - // data sources and provisioners used by the given configuration and - // state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // VariableValues contains the variable values across all modules. This - // structure is shared across the entire containing context, and so it - // may be accessed only when holding VariableValuesLock. - // The keys of the first level of VariableValues are the string - // representations of addrs.ModuleInstance values. The second-level keys - // are variable names within each module instance. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - Components contextComponentFactory - Hooks []Hook - InputValue UIInput - ProviderCache map[string]providers.Interface - ProviderInputConfig map[string]map[string]cty.Value - ProviderLock *sync.Mutex - ProvisionerCache map[string]provisioners.Interface - ProvisionerLock *sync.Mutex - ChangesValue *plans.ChangesSync - StateValue *states.SyncState - - once sync.Once -} - -// BuiltinEvalContext implements EvalContext -var _ EvalContext = (*BuiltinEvalContext)(nil) - -func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { - // This can happen during tests. During tests, we just block forever. - if ctx.StopContext == nil { - return nil - } - - return ctx.StopContext.Done() -} - -func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - for _, h := range ctx.Hooks { - action, err := fn(h) - if err != nil { - return err - } - - switch action { - case HookActionContinue: - continue - case HookActionHalt: - // Return an early exit error to trigger an early exit - log.Printf("[WARN] Early exit triggered by hook: %T", h) - return EvalEarlyExitError{} - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) Input() UIInput { - return ctx.InputValue -} - -func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { - ctx.once.Do(ctx.init) - absAddr := addr.Absolute(ctx.Path()) - - // If we already initialized, it is an error - if p := ctx.Provider(absAddr); p != nil { - return nil, fmt.Errorf("%s is already initialized", addr) - } - - // Warning: make sure to acquire these locks AFTER the call to Provider - // above, since it also acquires locks. - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := absAddr.String() - - p, err := ctx.Components.ResourceProvider(typeName, key) - if err != nil { - return nil, err - } - - log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) - ctx.ProviderCache[key] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - return ctx.ProviderCache[addr.String()] -} - -func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - ctx.once.Do(ctx.init) - - return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type.LegacyString()) -} - -func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { - ctx.once.Do(ctx.init) - - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - key := addr.Absolute(ctx.Path()).String() - provider := ctx.ProviderCache[key] - if provider != nil { - delete(ctx.ProviderCache, key) - return provider.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - absAddr := addr.Absolute(ctx.Path()) - p := ctx.Provider(absAddr) - if p == nil { - diags = diags.Append(fmt.Errorf("%s not initialized", addr)) - return diags - } - - providerSchema := ctx.ProviderSchema(absAddr) - if providerSchema == nil { - diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) - return diags - } - - req := providers.ConfigureRequest{ - TerraformVersion: version.String(), - Config: cfg, - } - - resp := p.Configure(req) - return resp.Diagnostics -} - -func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - return nil - } - - return ctx.ProviderInputConfig[pc.String()] -} - -func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { - absProvider := pc.Absolute(ctx.Path()) - - if !ctx.Path().IsRoot() { - // Only root module provider configurations can have input. - log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") - return - } - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[absProvider.String()] = c - ctx.ProviderLock.Unlock() -} - -func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - ctx.once.Do(ctx.init) - - // If we already initialized, it is an error - if p := ctx.Provisioner(n); p != nil { - return nil, fmt.Errorf("Provisioner '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provisioner - // above, since it also acquires locks. - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - p, err := ctx.Components.ResourceProvisioner(n, "") - if err != nil { - return nil, err - } - - ctx.ProvisionerCache[n] = p - - return p, nil -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - return ctx.ProvisionerCache[n] -} - -func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { - ctx.once.Do(ctx.init) - - return ctx.Schemas.ProvisionerConfig(n) -} - -func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { - ctx.once.Do(ctx.init) - - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - prov := ctx.ProvisionerCache[n] - if prov != nil { - return prov.Close() - } - - return nil -} - -func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - scope := ctx.EvaluationScope(self, keyData) - body, evalDiags := scope.ExpandBlock(body, schema) - diags = diags.Append(evalDiags) - val, evalDiags := scope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - return val, body, diags -} - -func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) - return scope.EvalExpr(expr, wantType) -} - -func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - data := &evaluationStateData{ - Evaluator: ctx.Evaluator, - ModulePath: ctx.PathValue, - InstanceKeyData: keyData, - Operation: ctx.Evaluator.Operation, - } - return ctx.Evaluator.Scope(data, self) -} - -func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { - return ctx.PathValue -} - -func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - childPath := n.ModuleInstance(ctx.PathValue) - key := childPath.String() - - args := ctx.VariableValues[key] - if args == nil { - args = make(map[string]cty.Value) - ctx.VariableValues[key] = vals - return - } - - for k, v := range vals { - args[k] = v - } -} - -func (ctx *BuiltinEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { - ctx.VariableValuesLock.Lock() - defer ctx.VariableValuesLock.Unlock() - - modKey := addr.Module.String() - modVars := ctx.VariableValues[modKey] - val, ok := modVars[addr.Variable.Name] - if !ok { - return cty.DynamicVal - } - return val -} - -func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { - return ctx.ChangesValue -} - -func (ctx *BuiltinEvalContext) State() *states.SyncState { - return ctx.StateValue -} - -func (ctx *BuiltinEvalContext) init() { -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go deleted file mode 100644 index 0985e9e8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ /dev/null @@ -1,329 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hcldec" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// MockEvalContext is a mock version of EvalContext that can be used -// for tests. -type MockEvalContext struct { - StoppedCalled bool - StoppedValue <-chan struct{} - - HookCalled bool - HookHook Hook - HookError error - - InputCalled bool - InputInput UIInput - - InitProviderCalled bool - InitProviderType string - InitProviderAddr addrs.ProviderConfig - InitProviderProvider providers.Interface - InitProviderError error - - ProviderCalled bool - ProviderAddr addrs.AbsProviderConfig - ProviderProvider providers.Interface - - ProviderSchemaCalled bool - ProviderSchemaAddr addrs.AbsProviderConfig - ProviderSchemaSchema *ProviderSchema - - CloseProviderCalled bool - CloseProviderAddr addrs.ProviderConfig - CloseProviderProvider providers.Interface - - ProviderInputCalled bool - ProviderInputAddr addrs.ProviderConfig - ProviderInputValues map[string]cty.Value - - SetProviderInputCalled bool - SetProviderInputAddr addrs.ProviderConfig - SetProviderInputValues map[string]cty.Value - - ConfigureProviderCalled bool - ConfigureProviderAddr addrs.ProviderConfig - ConfigureProviderConfig cty.Value - ConfigureProviderDiags tfdiags.Diagnostics - - InitProvisionerCalled bool - InitProvisionerName string - InitProvisionerProvisioner provisioners.Interface - InitProvisionerError error - - ProvisionerCalled bool - ProvisionerName string - ProvisionerProvisioner provisioners.Interface - - ProvisionerSchemaCalled bool - ProvisionerSchemaName string - ProvisionerSchemaSchema *configschema.Block - - CloseProvisionerCalled bool - CloseProvisionerName string - CloseProvisionerProvisioner provisioners.Interface - - EvaluateBlockCalled bool - EvaluateBlockBody hcl.Body - EvaluateBlockSchema *configschema.Block - EvaluateBlockSelf addrs.Referenceable - EvaluateBlockKeyData InstanceKeyEvalData - EvaluateBlockResultFunc func( - body hcl.Body, - schema *configschema.Block, - self addrs.Referenceable, - keyData InstanceKeyEvalData, - ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateBlockResult cty.Value - EvaluateBlockExpandedBody hcl.Body - EvaluateBlockDiags tfdiags.Diagnostics - - EvaluateExprCalled bool - EvaluateExprExpr hcl.Expression - EvaluateExprWantType cty.Type - EvaluateExprSelf addrs.Referenceable - EvaluateExprResultFunc func( - expr hcl.Expression, - wantType cty.Type, - self addrs.Referenceable, - ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set - EvaluateExprResult cty.Value - EvaluateExprDiags tfdiags.Diagnostics - - EvaluationScopeCalled bool - EvaluationScopeSelf addrs.Referenceable - EvaluationScopeKeyData InstanceKeyEvalData - EvaluationScopeScope *lang.Scope - - PathCalled bool - PathPath addrs.ModuleInstance - - SetModuleCallArgumentsCalled bool - SetModuleCallArgumentsModule addrs.ModuleCallInstance - SetModuleCallArgumentsValues map[string]cty.Value - - GetVariableValueCalled bool - GetVariableValueAddr addrs.AbsInputVariableInstance - GetVariableValueValue cty.Value - - ChangesCalled bool - ChangesChanges *plans.ChangesSync - - StateCalled bool - StateState *states.SyncState -} - -// MockEvalContext implements EvalContext -var _ EvalContext = (*MockEvalContext)(nil) - -func (c *MockEvalContext) Stopped() <-chan struct{} { - c.StoppedCalled = true - return c.StoppedValue -} - -func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { - c.HookCalled = true - if c.HookHook != nil { - if _, err := fn(c.HookHook); err != nil { - return err - } - } - - return c.HookError -} - -func (c *MockEvalContext) Input() UIInput { - c.InputCalled = true - return c.InputInput -} - -func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { - c.InitProviderCalled = true - c.InitProviderType = t - c.InitProviderAddr = addr - return c.InitProviderProvider, c.InitProviderError -} - -func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { - c.ProviderCalled = true - c.ProviderAddr = addr - return c.ProviderProvider -} - -func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { - c.ProviderSchemaCalled = true - c.ProviderSchemaAddr = addr - return c.ProviderSchemaSchema -} - -func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { - c.CloseProviderCalled = true - c.CloseProviderAddr = addr - return nil -} - -func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { - c.ConfigureProviderCalled = true - c.ConfigureProviderAddr = addr - c.ConfigureProviderConfig = cfg - return c.ConfigureProviderDiags -} - -func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { - c.ProviderInputCalled = true - c.ProviderInputAddr = addr - return c.ProviderInputValues -} - -func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { - c.SetProviderInputCalled = true - c.SetProviderInputAddr = addr - c.SetProviderInputValues = vals -} - -func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { - c.InitProvisionerCalled = true - c.InitProvisionerName = n - return c.InitProvisionerProvisioner, c.InitProvisionerError -} - -func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { - c.ProvisionerCalled = true - c.ProvisionerName = n - return c.ProvisionerProvisioner -} - -func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { - c.ProvisionerSchemaCalled = true - c.ProvisionerSchemaName = n - return c.ProvisionerSchemaSchema -} - -func (c *MockEvalContext) CloseProvisioner(n string) error { - c.CloseProvisionerCalled = true - c.CloseProvisionerName = n - return nil -} - -func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - c.EvaluateBlockCalled = true - c.EvaluateBlockBody = body - c.EvaluateBlockSchema = schema - c.EvaluateBlockSelf = self - c.EvaluateBlockKeyData = keyData - if c.EvaluateBlockResultFunc != nil { - return c.EvaluateBlockResultFunc(body, schema, self, keyData) - } - return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags -} - -func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - c.EvaluateExprCalled = true - c.EvaluateExprExpr = expr - c.EvaluateExprWantType = wantType - c.EvaluateExprSelf = self - if c.EvaluateExprResultFunc != nil { - return c.EvaluateExprResultFunc(expr, wantType, self) - } - return c.EvaluateExprResult, c.EvaluateExprDiags -} - -// installSimpleEval is a helper to install a simple mock implementation of -// both EvaluateBlock and EvaluateExpr into the receiver. -// -// These default implementations will either evaluate the given input against -// the scope in field EvaluationScopeScope or, if it is nil, with no eval -// context at all so that only constant values may be used. -// -// This function overwrites any existing functions installed in fields -// EvaluateBlockResultFunc and EvaluateExprResultFunc. -func (c *MockEvalContext) installSimpleEval() { - c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - var diags tfdiags.Diagnostics - body, diags = scope.ExpandBlock(body, schema) - if diags.HasErrors() { - return cty.DynamicVal, body, diags - } - val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return cty.DynamicVal, body, diags - } - return val, body, diags - } - - // Fallback codepath supporting constant values only. - val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) - return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) - } - c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { - if scope := c.EvaluationScopeScope; scope != nil { - // Fully-functional codepath. - return scope.EvalExpr(expr, wantType) - } - - // Fallback codepath supporting constant values only. - var diags tfdiags.Diagnostics - val, hclDiags := expr.Value(nil) - diags = diags.Append(hclDiags) - if hclDiags.HasErrors() { - return cty.DynamicVal, diags - } - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - diags = diags.Append(err) - return cty.DynamicVal, diags - } - return val, diags - } -} - -func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { - c.EvaluationScopeCalled = true - c.EvaluationScopeSelf = self - c.EvaluationScopeKeyData = keyData - return c.EvaluationScopeScope -} - -func (c *MockEvalContext) Path() addrs.ModuleInstance { - c.PathCalled = true - return c.PathPath -} - -func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { - c.SetModuleCallArgumentsCalled = true - c.SetModuleCallArgumentsModule = n - c.SetModuleCallArgumentsValues = values -} - -func (c *MockEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { - c.GetVariableValueCalled = true - c.GetVariableValueAddr = addr - return c.GetVariableValueValue -} - -func (c *MockEvalContext) Changes() *plans.ChangesSync { - c.ChangesCalled = true - return c.ChangesChanges -} - -func (c *MockEvalContext) State() *states.SyncState { - c.StateCalled = true - return c.StateState -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go deleted file mode 100644 index f3b07ef0..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go +++ /dev/null @@ -1,120 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// evaluateResourceCountExpression is our standard mechanism for interpreting an -// expression given for a "count" argument on a resource. This should be called -// from the DynamicExpand of a node representing a resource in order to -// determine the final count value. -// -// If the result is zero or positive and no error diagnostics are returned, then -// the result is the literal count value to use. -// -// If the result is -1, this indicates that the given expression is nil and so -// the "count" behavior should not be enabled for this resource at all. -// -// If error diagnostics are returned then the result is always the meaningless -// placeholder value -1. -func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { - count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx) - if !known { - // Currently this is a rather bad outcome from a UX standpoint, since we have - // no real mechanism to deal with this situation and all we can do is produce - // an error message. - // FIXME: In future, implement a built-in mechanism for deferring changes that - // can't yet be predicted, and use it to guide the user through several - // plan/apply steps until the desired configuration is eventually reached. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, - Subject: expr.Range().Ptr(), - }) - } - return count, diags -} - -// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression -// except that it handles an unknown result by returning count = 0 and -// a known = false, rather than by reporting the unknown value as an error -// diagnostic. -func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) { - if expr == nil { - return -1, true, nil - } - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return -1, true, diags - } - - switch { - case countVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - case !countVal.IsKnown(): - return 0, false, diags - } - - err := gocty.FromCtyValue(countVal, &count) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - } - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, - Subject: expr.Range().Ptr(), - }) - return -1, true, diags - } - - return count, true, diags -} - -// fixResourceCountSetTransition is a helper function to fix up the state when a -// resource transitions its "count" from being set to unset or vice-versa, -// treating a 0-key and a no-key instance as aliases for one another across -// the transition. -// -// The correct time to call this function is in the DynamicExpand method for -// a node representing a resource, just after evaluating the count with -// evaluateResourceCountExpression, and before any other analysis of the -// state such as orphan detection. -// -// This function calls methods on the given EvalContext to update the current -// state in-place, if necessary. It is a no-op if there is no count transition -// taking place. -// -// Since the state is modified in-place, this function must take a writer lock -// on the state. The caller must therefore not also be holding a state lock, -// or this function will block forever awaiting the lock. -func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { - state := ctx.State() - changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) - if changed { - log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go deleted file mode 100644 index 647c58d1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go +++ /dev/null @@ -1,77 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -// -// This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct { - Config *configs.Config -} - -// TODO: test -func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // We'll temporarily lock the state to grab the modules, then work on each - // one separately while taking a lock again for each separate resource. - // This means that if another caller concurrently adds a module here while - // we're working then we won't update it, but that's no worse than the - // concurrent writer blocking for our entire fixup process and _then_ - // adding a new module, and in practice the graph node associated with - // this eval depends on everything else in the graph anyway, so there - // should not be concurrent writers. - state := ctx.State().Lock() - moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) - for _, m := range state.Modules { - moduleAddrs = append(moduleAddrs, m.Addr) - } - ctx.State().Unlock() - - for _, addr := range moduleAddrs { - cfg := n.Config.DescendentForInstance(addr) - if cfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - if err := n.fixModule(ctx, addr); err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { - ms := ctx.State().Module(moduleAddr) - cfg := n.Config.DescendentForInstance(moduleAddr) - if ms == nil { - // Theoretically possible for a concurrent writer to delete a module - // while we're running, but in practice the graph node that called us - // depends on everything else in the graph and so there can never - // be a concurrent writer. - return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) - } - if cfg == nil { - return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) - } - - for _, r := range ms.Resources { - addr := r.Addr.Absolute(moduleAddr) - rCfg := cfg.Module.ResourceByAddr(r.Addr) - if rCfg == nil { - log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) - continue - } - hasCount := rCfg.Count != nil - fixResourceCountSetTransition(ctx, addr, hasCount) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go deleted file mode 100644 index cc9046c2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ /dev/null @@ -1,803 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalCheckPlannedChange is an EvalNode implementation that produces errors -// if the _actual_ expected value is not compatible with what was recorded -// in the plan. -// -// Errors here are most often indicative of a bug in the provider, so our -// error messages will report with that in mind. It's also possible that -// there's a bug in Terraform's Core's own "proposed new value" code in -// EvalDiff. -type EvalCheckPlannedChange struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // We take ResourceInstanceChange objects here just because that's what's - // convenient to pass in from the evaltree implementation, but we really - // only look at the "After" value of each change. - Planned, Actual **plans.ResourceInstanceChange -} - -func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - plannedChange := *n.Planned - actualChange := *n.Actual - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) - } - - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - - log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) - - if plannedChange.Action != actualChange.Action { - switch { - case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: - // It's okay for an update to become a NoOp once we've filled in - // all of the unknown values, since the final values might actually - // match what was there before after all. - log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, - plannedChange.Action, actualChange.Action, - ), - )) - } - } - - errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), - ), - )) - } - return nil, diags.Err() -} - -// EvalDiff is an EvalNode implementation that detects changes for a given -// resource instance. -type EvalDiff struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - PreviousDiff **plans.ResourceInstanceChange - - // CreateBeforeDestroy is set if either the resource's own config sets - // create_before_destroy explicitly or if dependencies have forced the - // resource to be handled as create_before_destroy in order to avoid - // a dependency cycle. - CreateBeforeDestroy bool - - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputState **states.ResourceInstanceObject - - Stub bool -} - -// TODO: test -func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - - if providerSchema == nil { - return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) - } - if n.ProviderAddr.ProviderConfig.Type.LegacyString() == "" { - panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) - } - - var diags tfdiags.Diagnostics - - // Evaluate the configuration - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - absAddr := n.Addr.Absolute(ctx.Path()) - var priorVal cty.Value - var priorValTainted cty.Value - var priorPrivate []byte - if state != nil { - if state.Status != states.ObjectTainted { - priorVal = state.Value - priorPrivate = state.Private - } else { - // If the prior state is tainted then we'll proceed below like - // we're creating an entirely new object, but then turn it into - // a synthetic "Replace" change at the end, creating the same - // result as if the provider had marked at least one argument - // change as "requires replacement". - priorValTainted = state.Value - priorVal = cty.NullVal(schema.ImpliedType()) - } - } else { - priorVal = cty.NullVal(schema.ImpliedType()) - } - - proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) - - // Call pre-diff hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - } - - log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) - // Allow the provider to validate the final set of values. - // The config was statically validated early on, but there may have been - // unknown values which the provider could not validate at the time. - validateResp := provider.ValidateResourceTypeConfig( - providers.ValidateResourceTypeConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() - } - - // The provider gets an opportunity to customize the proposed new value, - // which in turn produces the _planned_ new value. But before - // we send back this information, we need to process ignore_changes - // so that CustomizeDiff will not act on them - var ignoreChangeDiags tfdiags.Diagnostics - proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: priorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: priorPrivate, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - plannedNewVal := resp.PlannedState - plannedPrivate := resp.PlannedPrivate - - if plannedNewVal == cty.NilVal { - // Should never happen. Since real-world providers return via RPC a nil - // is always a bug in the client-side stub. This is more likely caused - // by an incompletely-configured mock provider in tests, though. - panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) - } - - // We allow the planned new value to disagree with configuration _values_ - // here, since that allows the provider to do special logic like a - // DiffSuppressFunc, but we still require that the provider produces - // a value whose type conforms to the schema. - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - return nil, diags.Err() - } - } - - // TODO: We should be able to remove this repeat of processing ignored changes - // after the plan, which helps providers relying on old behavior "just work" - // in the next major version, such that we can be stricter about ignore_changes - // values - plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - // The provider produces a list of paths to attributes whose changes mean - // that we must replace rather than update an existing remote object. - // However, we only need to do that if the identified attributes _have_ - // actually changed -- particularly after we may have undone some of the - // changes in processIgnoreChanges -- so now we'll filter that list to - // include only where changes are detected. - reqRep := cty.NewPathSet() - if len(resp.RequiresReplace) > 0 { - for _, path := range resp.RequiresReplace { - if priorVal.IsNull() { - // If prior is null then we don't expect any RequiresReplace at all, - // because this is a Create action. - continue - } - - priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) - plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) - if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { - // This means the path was invalid in both the prior and new - // values, which is an error with the provider itself. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, path, - ), - )) - continue - } - - // Make sure we have valid Values for both values. - // Note: if the opposing value was of the type - // cty.DynamicPseudoType, the type assigned here may not exactly - // match the schema. This is fine here, since we're only going to - // check for equality, but if the NullVal is to be used, we need to - // check the schema for th true type. - switch { - case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: - // this should never happen without ApplyPath errors above - panic("requires replace path returned 2 nil values") - case priorChangedVal == cty.NilVal: - priorChangedVal = cty.NullVal(plannedChangedVal.Type()) - case plannedChangedVal == cty.NilVal: - plannedChangedVal = cty.NullVal(priorChangedVal.Type()) - } - - eqV := plannedChangedVal.Equals(priorChangedVal) - if !eqV.IsKnown() || eqV.False() { - reqRep.Add(path) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - eqV := plannedNewVal.Equals(priorVal) - eq := eqV.IsKnown() && eqV.True() - - var action plans.Action - switch { - case priorVal.IsNull(): - action = plans.Create - case eq: - action = plans.NoOp - case !reqRep.Empty(): - // If there are any "requires replace" paths left _after our filtering - // above_ then this is a replace action. - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - default: - action = plans.Update - // "Delete" is never chosen here, because deletion plans are always - // created more directly elsewhere, such as in "orphan" handling. - } - - if action.IsReplace() { - // In this strange situation we want to produce a change object that - // shows our real prior object but has a _new_ object that is built - // from a null prior object, since we're going to delete the one - // that has all the computed values on it. - // - // Therefore we'll ask the provider to plan again here, giving it - // a null object for the prior, and then we'll meld that with the - // _actual_ prior state to produce a correctly-shaped replace change. - // The resulting change should show any computed attributes changing - // from known prior values to unknown values, unless the provider is - // able to predict new values for any of these computed attributes. - nullPriorVal := cty.NullVal(schema.ImpliedType()) - - // create a new proposed value from the null state and the config - proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) - - resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - PriorState: nullPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: plannedPrivate, - }) - // We need to tread carefully here, since if there are any warnings - // in here they probably also came out of our previous call to - // PlanResourceChange above, and so we don't want to repeat them. - // Consequently, we break from the usual pattern here and only - // append these new diagnostics if there's at least one error inside. - if resp.Diagnostics.HasErrors() { - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - return nil, diags.Err() - } - plannedNewVal = resp.PlannedState - plannedPrivate = resp.PlannedPrivate - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // If our prior value was tainted then we actually want this to appear - // as a replace change, even though so far we've been treating it as a - // create. - if action == plans.Create && priorValTainted != cty.NilVal { - if n.CreateBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - priorVal = priorValTainted - } - - // As a special case, if we have a previous diff (presumably from the plan - // phases, whereas we're now in the apply phase) and it was for a replace, - // we've already deleted the original object from state by the time we - // get here and so we would've ended up with a _create_ action this time, - // which we now need to paper over to get a result consistent with what - // we originally intended. - if n.PreviousDiff != nil { - prevChange := *n.PreviousDiff - if prevChange.Action.IsReplace() && action == plans.Create { - log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) - action = prevChange.Action - priorVal = prevChange.Before - } - } - - // Call post-refresh hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) - }) - if err != nil { - return nil, err - } - } - - // Update our output if we care - if n.OutputChange != nil { - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - Private: plannedPrivate, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: action, - Before: priorVal, - After: plannedNewVal, - }, - RequiredReplace: reqRep, - } - } - - if n.OutputValue != nil { - *n.OutputValue = configVal - } - - // Update the state if we care - if n.OutputState != nil { - *n.OutputState = &states.ResourceInstanceObject{ - // We use the special "planned" status here to note that this - // object's value is not yet complete. Objects with this status - // cannot be used during expression evaluation, so the caller - // must _also_ record the returned change in the active plan, - // which the expression evaluator will use in preference to this - // incomplete value recorded in the state. - Status: states.ObjectPlanned, - Value: plannedNewVal, - Private: plannedPrivate, - } - } - - return nil, nil -} - -func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { - // ignore_changes only applies when an object already exists, since we - // can't ignore changes to a thing we've not created yet. - if prior.IsNull() { - return proposed, nil - } - - ignoreChanges := n.Config.Managed.IgnoreChanges - ignoreAll := n.Config.Managed.IgnoreAllChanges - - if len(ignoreChanges) == 0 && !ignoreAll { - return proposed, nil - } - if ignoreAll { - return prior, nil - } - if prior.IsNull() || proposed.IsNull() { - // Ignore changes doesn't apply when we're creating for the first time. - // Proposed should never be null here, but if it is then we'll just let it be. - return proposed, nil - } - - return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) -} - -func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { - // When we walk below we will be using cty.Path values for comparison, so - // we'll convert our traversals here so we can compare more easily. - ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) - for i, traversal := range ignoreChanges { - path := make(cty.Path, len(traversal)) - for si, step := range traversal { - switch ts := step.(type) { - case hcl.TraverseRoot: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseAttr: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseIndex: - path[si] = cty.IndexStep{ - Key: ts.Key, - } - default: - panic(fmt.Sprintf("unsupported traversal step %#v", step)) - } - } - ignoreChangesPath[i] = path - } - - var diags tfdiags.Diagnostics - ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { - // First we must see if this is a path that's being ignored at all. - // We're looking for an exact match here because this walk will visit - // leaf values first and then their containers, and we want to do - // the "ignore" transform once we reach the point indicated, throwing - // away any deeper values we already produced at that point. - var ignoreTraversal hcl.Traversal - for i, candidate := range ignoreChangesPath { - if path.Equals(candidate) { - ignoreTraversal = ignoreChanges[i] - } - } - if ignoreTraversal == nil { - return v, nil - } - - // If we're able to follow the same path through the prior value, - // we'll take the value there instead, effectively undoing the - // change that was planned. - priorV, diags := hcl.ApplyPath(prior, path, nil) - if diags.HasErrors() { - // We just ignore the errors and move on here, since we assume it's - // just because the prior value was a slightly-different shape. - // It could potentially also be that the traversal doesn't match - // the schema, but we should've caught that during the validate - // walk if so. - return v, nil - } - return priorV, nil - }) - return ret, diags -} - -// a group of key-*ResourceAttrDiff pairs from the same flatmapped container -type flatAttrDiff map[string]*ResourceAttrDiff - -// we need to keep all keys if any of them have a diff that's not ignored -func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool { - for k, v := range f { - ignore := false - for attr := range ignoreChanges { - if strings.HasPrefix(k, attr) { - ignore = true - } - } - - if !v.Empty() && !v.NewComputed && !ignore { - return true - } - } - return false -} - -// EvalDiffDestroy is an EvalNode implementation that returns a plain -// destroy diff. -type EvalDiffDestroy struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - State **states.ResourceInstanceObject - ProviderAddr addrs.AbsProviderConfig - - Output **plans.ResourceInstanceChange - OutputState **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - - if n.ProviderAddr.ProviderConfig.Type.LegacyString() == "" { - if n.DeposedKey == "" { - panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) - } else { - panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) - } - } - - // If there is no state or our attributes object is null then we're already - // destroyed. - if state == nil || state.Value.IsNull() { - return nil, nil - } - - // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff( - absAddr, n.DeposedKey.Generation(), - state.Value, - cty.NullVal(cty.DynamicPseudoType), - ) - }) - if err != nil { - return nil, err - } - - // Change is always the same for a destroy. We don't need the provider's - // help for this one. - // TODO: Should we give the provider an opportunity to veto this? - change := &plans.ResourceInstanceChange{ - Addr: absAddr, - DeposedKey: n.DeposedKey, - Change: plans.Change{ - Action: plans.Delete, - Before: state.Value, - After: cty.NullVal(cty.DynamicPseudoType), - }, - Private: state.Private, - ProviderAddr: n.ProviderAddr, - } - - // Call post-diff hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff( - absAddr, - n.DeposedKey.Generation(), - change.Action, - change.Before, - change.After, - ) - }) - if err != nil { - return nil, err - } - - // Update our output - *n.Output = change - - if n.OutputState != nil { - // Record our proposed new state, which is nil because we're destroying. - *n.OutputState = nil - } - - return nil, nil -} - -// EvalReduceDiff is an EvalNode implementation that takes a planned resource -// instance change as might be produced by EvalDiff or EvalDiffDestroy and -// "simplifies" it to a single atomic action to be performed by a specific -// graph node. -// -// Callers must specify whether they are a destroy node or a regular apply -// node. If the result is NoOp then the given change requires no action for -// the specific graph node calling this and so evaluation of the that graph -// node should exit early and take no action. -// -// The object written to OutChange may either be identical to InChange or -// a new change object derived from InChange. Because of the former case, the -// caller must not mutate the object returned in OutChange. -type EvalReduceDiff struct { - Addr addrs.ResourceInstance - InChange **plans.ResourceInstanceChange - Destroy bool - OutChange **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { - in := *n.InChange - out := in.Simplify(n.Destroy) - if n.OutChange != nil { - *n.OutChange = out - } - if out.Action != in.Action { - if n.Destroy { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) - } else { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) - } - } - return nil, nil -} - -// EvalReadDiff is an EvalNode implementation that retrieves the planned -// change for a particular resource instance object. -type EvalReadDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - csrc := changes.GetResourceInstanceChange(addr, gen) - if csrc == nil { - log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) - return nil, nil - } - - change, err := csrc.Decode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) - } - if n.Change != nil { - *n.Change = change - } - - log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) - - return nil, nil -} - -// EvalWriteDiff is an EvalNode implementation that saves a planned change -// for an instance object into the set of global planned changes. -type EvalWriteDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - if n.Change == nil || *n.Change == nil { - // Caller sets nil to indicate that we need to remove a change from - // the set of changes. - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - changes.RemoveResourceInstanceChange(addr, gen) - return nil, nil - } - - providerSchema := *n.ProviderSchema - change := *n.Change - - if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { - // Should never happen, and indicates a bug in the caller. - panic("inconsistent address and/or deposed key in EvalWriteDiff") - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - csrc, err := change.Encode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) - } - - changes.AppendResourceInstanceChange(csrc) - if n.DeposedKey == states.NotDeposed { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) - } else { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go deleted file mode 100644 index 470f798b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// EvalReturnError is an EvalNode implementation that returns an -// error if it is present. -// -// This is useful for scenarios where an error has been captured by -// another EvalNode (like EvalApply) for special EvalTree-based error -// handling, and that handling has completed, so the error should be -// returned normally. -type EvalReturnError struct { - Error *error -} - -func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { - if n.Error == nil { - return nil, nil - } - - return nil, *n.Error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go deleted file mode 100644 index 711c625c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -// EvalNodeFilterFunc is the callback used to replace a node with -// another to node. To not do the replacement, just return the input node. -type EvalNodeFilterFunc func(EvalNode) EvalNode - -// EvalNodeFilterable is an interface that can be implemented by -// EvalNodes to allow filtering of sub-elements. Note that this isn't -// a common thing to implement and you probably don't need it. -type EvalNodeFilterable interface { - EvalNode - Filter(EvalNodeFilterFunc) -} - -// EvalFilter runs the filter on the given node and returns the -// final filtered value. This should be called rather than checking -// the EvalNode directly since this will properly handle EvalNodeFilterables. -func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { - if f, ok := node.(EvalNodeFilterable); ok { - f.Filter(fn) - return node - } - - return fn(node) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go deleted file mode 100644 index 1a55f024..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go +++ /dev/null @@ -1,49 +0,0 @@ -package terraform - -// EvalNodeOpFilterable is an interface that EvalNodes can implement -// to be filterable by the operation that is being run on Terraform. -type EvalNodeOpFilterable interface { - IncludeInOp(walkOperation) bool -} - -// EvalNodeFilterOp returns a filter function that filters nodes that -// include themselves in specific operations. -func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { - return func(n EvalNode) EvalNode { - include := true - if of, ok := n.(EvalNodeOpFilterable); ok { - include = of.IncludeInOp(op) - } - if include { - return n - } - - return EvalNoop{} - } -} - -// EvalOpFilter is an EvalNode implementation that is a proxy to -// another node but filters based on the operation. -type EvalOpFilter struct { - // Ops is the list of operations to include this node in. - Ops []walkOperation - - // Node is the node to execute - Node EvalNode -} - -// TODO: test -func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { - return EvalRaw(n.Node, ctx) -} - -// EvalNodeOpFilterable impl. -func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { - for _, v := range n.Ops { - if v == op { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go deleted file mode 100644 index 59999572..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go +++ /dev/null @@ -1,111 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// evaluateResourceForEachExpression interprets a "for_each" argument on a resource. -// -// Returns a cty.Value map, and diagnostics if necessary. It will return nil if -// the expression is nil, and is used to distinguish between an unset for_each and an -// empty map -func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { - forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx) - if !known { - // Attach a diag as we do with count, with the same downsides - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, - Subject: expr.Range().Ptr(), - }) - } - return forEachMap, diags -} - -// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression -// except that it handles an unknown result by returning an empty map and -// a known = false, rather than by reporting the unknown value as an error -// diagnostic. -func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) { - if expr == nil { - return nil, true, nil - } - - forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - diags = diags.Append(forEachDiags) - if diags.HasErrors() { - return nil, true, diags - } - - switch { - case forEachVal.IsNull(): - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - case !forEachVal.IsKnown(): - return map[string]cty.Value{}, false, diags - } - - if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() || forEachVal.Type().IsTupleType() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - - // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap - // This also covers an empty set (toset([])) - if forEachVal.LengthInt() == 0 { - return map[string]cty.Value{}, true, diags - } - - if forEachVal.Type().IsSetType() { - if forEachVal.Type().ElementType() != cty.String { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - - // A set may contain unknown values that must be - // discovered by checking with IsWhollyKnown (which iterates through the - // structure), while for maps in cty, keys can never be unknown or null, - // thus the earlier IsKnown check suffices for maps - if !forEachVal.IsWhollyKnown() { - return map[string]cty.Value{}, false, diags - } - - // A set of strings may contain null, which makes it impossible to - // convert to a map, so we must return an error - it := forEachVal.ElementIterator() - for it.Next() { - item, _ := it.Element() - if item.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" sets must not contain null values.`), - Subject: expr.Range().Ptr(), - }) - return nil, true, diags - } - } - } - - return forEachVal.AsValueMap(), true, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go deleted file mode 100644 index d6b46a1f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go +++ /dev/null @@ -1,26 +0,0 @@ -package terraform - -// EvalIf is an EvalNode that is a conditional. -type EvalIf struct { - If func(EvalContext) (bool, error) - Then EvalNode - Else EvalNode -} - -// TODO: test -func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { - yes, err := n.If(ctx) - if err != nil { - return nil, err - } - - if yes { - return EvalRaw(n.Then, ctx) - } else { - if n.Else != nil { - return EvalRaw(n.Else, ctx) - } - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go deleted file mode 100644 index a60f4a0a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalImportState is an EvalNode implementation that performs an -// ImportState operation on a provider. This will return the imported -// states but won't modify any actual state. -type EvalImportState struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ID string - Output *[]providers.ImportedResource -} - -// TODO: test -func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - provider := *n.Provider - var diags tfdiags.Diagnostics - - { - // Call pre-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(absAddr, n.ID) - }) - if err != nil { - return nil, err - } - } - - resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ - TypeName: n.Addr.Resource.Type, - ID: n.ID, - }) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - imported := resp.ImportedResources - - for _, obj := range imported { - log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) - } - - if n.Output != nil { - *n.Output = imported - } - - { - // Call post-import hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(absAddr, imported) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalImportStateVerify verifies the state after ImportState and -// after the refresh to make sure it is non-nil and valid. -type EvalImportStateVerify struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - state := *n.State - if state.Value.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", - n.Addr.String(), - ), - )) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go deleted file mode 100644 index d3a4f5b4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go +++ /dev/null @@ -1,61 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// EvalConfigBlock is an EvalNode implementation that takes a raw -// configuration block and evaluates any expressions within it. -// -// ExpandedConfig is populated with the result of expanding any "dynamic" -// blocks in the given body, which can be useful for extracting correct source -// location information for specific attributes in the result. -type EvalConfigBlock struct { - Config *hcl.Body - Schema *configschema.Block - SelfAddr addrs.Referenceable - Output *cty.Value - ExpandedConfig *hcl.Body - ContinueOnErr bool -} - -func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { - val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) - if diags.HasErrors() && n.ContinueOnErr { - log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) - return nil, EvalEarlyExitError{} - } - - if n.Output != nil { - *n.Output = val - } - if n.ExpandedConfig != nil { - *n.ExpandedConfig = body - } - - return nil, diags.ErrWithWarnings() -} - -// EvalConfigExpr is an EvalNode implementation that takes a raw configuration -// expression and evaluates it. -type EvalConfigExpr struct { - Expr hcl.Expression - SelfAddr addrs.Referenceable - Output *cty.Value -} - -func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) - - if n.Output != nil { - *n.Output = val - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go deleted file mode 100644 index f30286e2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalLocal is an EvalNode implementation that evaluates the -// expression for a local value and writes it into a transient part of -// the state. -type EvalLocal struct { - Addr addrs.LocalValue - Expr hcl.Expression -} - -func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - // We ignore diags here because any problems we might find will be found - // again in EvaluateExpr below. - refs, _ := lang.ReferencesInExpr(n.Expr) - for _, ref := range refs { - if ref.Subject == n.Addr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referencing local value", - Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), - Subject: ref.SourceRange.ToHCL().Ptr(), - Context: n.Expr.Range().Ptr(), - }) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - - val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - return nil, diags.Err() - } - - state := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write local value to nil state") - } - - state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) - - return nil, nil -} - -// EvalDeleteLocal is an EvalNode implementation that deletes a Local value -// from the state. Locals aren't persisted, but we don't need to evaluate them -// during destroy. -type EvalDeleteLocal struct { - Addr addrs.LocalValue -} - -func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go deleted file mode 100644 index f4bc8225..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -// EvalNoop is an EvalNode that does nothing. -type EvalNoop struct{} - -func (EvalNoop) Eval(EvalContext) (interface{}, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go deleted file mode 100644 index 181e5563..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ /dev/null @@ -1,135 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" -) - -// EvalDeleteOutput is an EvalNode implementation that deletes an output -// from the state. -type EvalDeleteOutput struct { - Addr addrs.OutputValue -} - -// TODO: test -func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state := ctx.State() - if state == nil { - return nil, nil - } - - state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) - return nil, nil -} - -// EvalWriteOutput is an EvalNode implementation that writes the output -// for the given name to the current state. -type EvalWriteOutput struct { - Addr addrs.OutputValue - Sensitive bool - Expr hcl.Expression - // ContinueOnErr allows interpolation to fail during Input - ContinueOnErr bool -} - -// TODO: test -func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - addr := n.Addr.Absolute(ctx.Path()) - - // This has to run before we have a state lock, since evaluation also - // reads the state - val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) - // We'll handle errors below, after we have loaded the module. - - state := ctx.State() - if state == nil { - return nil, nil - } - - changes := ctx.Changes() // may be nil, if we're not working on a changeset - - // handling the interpolation error - if diags.HasErrors() { - if n.ContinueOnErr || flagWarnOutputErrors { - log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) - // if we're continuing, make sure the output is included, and - // marked as unknown. If the evaluator was able to find a type - // for the value in spite of the error then we'll use it. - n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) - return nil, EvalEarlyExitError{} - } - return nil, diags.Err() - } - - n.setValue(addr, state, changes, val) - - return nil, nil -} - -func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { - if val.IsKnown() && !val.IsNull() { - // The state itself doesn't represent unknown values, so we null them - // out here and then we'll save the real unknown value in the planned - // changeset below, if we have one on this graph walk. - log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) - stateVal := cty.UnknownAsNull(val) - state.SetOutputValue(addr, stateVal, n.Sensitive) - } else { - log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) - state.RemoveOutputValue(addr) - } - - // If we also have an active changeset then we'll replicate the value in - // there. This is used in preference to the state where present, since it - // *is* able to represent unknowns, while the state cannot. - if changes != nil { - // For the moment we are not properly tracking changes to output - // values, and just marking them always as "Create" or "Destroy" - // actions. A future release will rework the output lifecycle so we - // can track their changes properly, in a similar way to how we work - // with resource instances. - - var change *plans.OutputChange - if !val.IsNull() { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - Action: plans.Create, - Before: cty.NullVal(cty.DynamicPseudoType), - After: val, - }, - } - } else { - change = &plans.OutputChange{ - Addr: addr, - Sensitive: n.Sensitive, - Change: plans.Change{ - // This is just a weird placeholder delete action since - // we don't have an actual prior value to indicate. - // FIXME: Generate real planned changes for output values - // that include the old values. - Action: plans.Delete, - Before: cty.NullVal(cty.DynamicPseudoType), - After: cty.NullVal(cty.DynamicPseudoType), - }, - } - } - - cs, err := change.Encode() - if err != nil { - // Should never happen, since we just constructed this right above - panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) - } - log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) - changes.RemoveOutputChange(addr) // remove any existing planned change, if present - changes.AppendOutputChange(cs) // add the new planned change - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go deleted file mode 100644 index 2ff3745d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ /dev/null @@ -1,147 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/tfdiags" -) - -func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { - var configBody hcl.Body - if config != nil { - configBody = config.Config - } - - var inputBody hcl.Body - inputConfig := ctx.ProviderInput(addr) - if len(inputConfig) > 0 { - inputBody = configs.SynthBody("", inputConfig) - } - - switch { - case configBody != nil && inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) - // Note that the inputBody is the _base_ here, because configs.MergeBodies - // expects the base have all of the required fields, while these are - // forced to be optional for the override. The input process should - // guarantee that we have a value for each of the required arguments and - // that in practice the sets of attributes in each body will be - // disjoint. - return configs.MergeBodies(inputBody, configBody) - case configBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) - return configBody - case inputBody != nil: - log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) - return inputBody - default: - log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) - return hcl.EmptyBody() - } -} - -// EvalConfigProvider is an EvalNode implementation that configures -// a provider that is already initialized and retrieved. -type EvalConfigProvider struct { - Addr addrs.ProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil { - return nil, fmt.Errorf("EvalConfigProvider Provider is nil") - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - config := n.Config - - configBody := buildProviderConfig(ctx, n.Addr, config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configDiags := ctx.ConfigureProvider(n.Addr, configVal) - configDiags = configDiags.InConfigBody(configBody) - - return nil, configDiags.ErrWithWarnings() -} - -// EvalInitProvider is an EvalNode implementation that initializes a provider -// and returns nothing. The provider can be retrieved again with the -// EvalGetProvider node. -type EvalInitProvider struct { - TypeName string - Addr addrs.ProviderConfig -} - -func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.TypeName, n.Addr) -} - -// EvalCloseProvider is an EvalNode implementation that closes provider -// connections that aren't needed anymore. -type EvalCloseProvider struct { - Addr addrs.ProviderConfig -} - -func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Addr) - return nil, nil -} - -// EvalGetProvider is an EvalNode implementation that retrieves an already -// initialized provider instance for the given name. -// -// Unlike most eval nodes, this takes an _absolute_ provider configuration, -// because providers can be passed into and inherited between modules. -// Resource nodes must therefore know the absolute path of the provider they -// will use, which is usually accomplished by implementing -// interface GraphNodeProviderConsumer. -type EvalGetProvider struct { - Addr addrs.AbsProviderConfig - Output *providers.Interface - - // If non-nil, Schema will be updated after eval to refer to the - // schema of the provider. - Schema **ProviderSchema -} - -func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - if n.Addr.ProviderConfig.Type.LegacyString() == "" { - // Should never happen - panic("EvalGetProvider used with uninitialized provider configuration address") - } - - result := ctx.Provider(n.Addr) - if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Addr) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProviderSchema(n.Addr) - } - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go deleted file mode 100644 index bc6b5cc7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go +++ /dev/null @@ -1,55 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/provisioners" -) - -// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner -// and returns nothing. The provisioner can be retrieved again with the -// EvalGetProvisioner node. -type EvalInitProvisioner struct { - Name string -} - -func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvisioner(n.Name) -} - -// EvalCloseProvisioner is an EvalNode implementation that closes provisioner -// connections that aren't needed anymore. -type EvalCloseProvisioner struct { - Name string -} - -func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvisioner(n.Name) - return nil, nil -} - -// EvalGetProvisioner is an EvalNode implementation that retrieves an already -// initialized provisioner instance for the given name. -type EvalGetProvisioner struct { - Name string - Output *provisioners.Interface - Schema **configschema.Block -} - -func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provisioner(n.Name) - if result == nil { - return nil, fmt.Errorf("provisioner %s not initialized", n.Name) - } - - if n.Output != nil { - *n.Output = result - } - - if n.Schema != nil { - *n.Schema = ctx.ProvisionerSchema(n.Name) - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go deleted file mode 100644 index e58ec7c6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go +++ /dev/null @@ -1,390 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalReadData is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type EvalReadData struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // Planned is set when dealing with data resources that were deferred to - // the apply walk, to let us see what was planned. If this is set, the - // evaluation of the config is required to produce a wholly-known - // configuration which is consistent with the partial object included - // in this planned change. - Planned **plans.ResourceInstanceChange - - // ForcePlanRead, if true, overrides the usual behavior of immediately - // reading from the data source where possible, instead forcing us to - // _always_ generate a plan. This is used during the plan walk, since we - // mustn't actually apply anything there. (The resulting state doesn't - // get persisted) - ForcePlanRead bool - - // The result from this EvalNode has a few different possibilities - // depending on the input: - // - If Planned is nil then we assume we're aiming to _produce_ the plan, - // and so the following two outcomes are possible: - // - OutputChange.Action is plans.NoOp and OutputState is the complete - // result of reading from the data source. This is the easy path. - // - OutputChange.Action is plans.Read and OutputState is a planned - // object placeholder (states.ObjectPlanned). In this case, the - // returned change must be recorded in the overral changeset and - // eventually passed to another instance of this struct during the - // apply walk. - // - If Planned is non-nil then we assume we're aiming to complete a - // planned read from an earlier plan walk. In this case the only possible - // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp - // change and put the complete resulting state in OutputState, ready to - // be saved in the overall state and used for expression evaluation. - OutputChange **plans.ResourceInstanceChange - OutputValue *cty.Value - OutputConfigValue *cty.Value - OutputState **states.ResourceInstanceObject -} - -func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadData: working on %s", absAddr) - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - var change *plans.ResourceInstanceChange - var configVal cty.Value - - // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and - // EvalReadDataApply did, but it seems like we should handle that via a - // separate mechanism since it boils down to just deleting the object from - // the state... and we do that on every plan anyway, forcing the data - // resource to re-read. - - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) - } - - // We'll always start by evaluating the configuration. What we do after - // that will depend on the evaluation result along with what other inputs - // we were given. - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time - - forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) - - // If our configuration contains any unknown values then we must defer the - // read to the apply phase by producing a "Read" change for this resource, - // and a placeholder value for it in the state. - if n.ForcePlanRead || !configVal.IsWhollyKnown() { - // If the configuration is still unknown when we're applying a planned - // change then that indicates a bug in Terraform, since we should have - // everything resolved by now. - if n.Planned != nil && *n.Planned != nil { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - if n.ForcePlanRead { - log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) - } else { - log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) - } - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - - change = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: proposedNewVal, - }, - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - - if n.OutputChange != nil { - *n.OutputChange = change - } - if n.OutputValue != nil { - *n.OutputValue = change.After - } - if n.OutputConfigValue != nil { - *n.OutputConfigValue = configVal - } - if n.OutputState != nil { - state := &states.ResourceInstanceObject{ - Value: change.After, - Status: states.ObjectPlanned, // because the partial value in the plan must be used for now - } - *n.OutputState = state - } - - return nil, diags.ErrWithWarnings() - } - - if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { - // If any other action gets in here then that's always a bug; this - // EvalNode only deals with reading. - return nil, fmt.Errorf( - "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", - (*n.Planned).Action, absAddr, - ) - } - - log.Printf("[TRACE] Re-validating config for %s", absAddr) - validateResp := provider.ValidateDataSourceConfig( - providers.ValidateDataSourceConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err() - } - - // If we get down here then our configuration is complete and we're read - // to actually call the provider to read the data. - log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) - - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) - }) - if err != nil { - return nil, err - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - newVal := resp.State - if newVal == cty.NilVal { - // This can happen with incompletely-configured mocks. We'll allow it - // and treat it as an alias for a properly-typed null value. - newVal = cty.NullVal(schema.ImpliedType()) - } - - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced null object", - fmt.Sprintf( - "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, - ), - )) - } - if !newVal.IsWhollyKnown() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, - ), - )) - - // We'll still save the object, but we need to eliminate any unknown - // values first because we can't serialize them in the state file. - // Note that this may cause set elements to be coalesced if they - // differed only by having unknown values, but we don't worry about - // that here because we're saving the value only for inspection - // purposes; the error we added above will halt the graph walk. - newVal = cty.UnknownAsNull(newVal) - } - - // Since we've completed the read, we actually have no change to make, but - // we'll produce a NoOp one anyway to preserve the usual flow of the - // plan phase and allow it to produce a complete plan. - change = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.NoOp, - Before: newVal, - After: newVal, - }, - } - state := &states.ResourceInstanceObject{ - Value: change.After, - Status: states.ObjectReady, // because we completed the read from the provider - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) - }) - if err != nil { - return nil, err - } - - if n.OutputChange != nil { - *n.OutputChange = change - } - if n.OutputValue != nil { - *n.OutputValue = change.After - } - if n.OutputConfigValue != nil { - *n.OutputConfigValue = configVal - } - if n.OutputState != nil { - *n.OutputState = state - } - - return nil, diags.ErrWithWarnings() -} - -// EvalReadDataApply is an EvalNode implementation that executes a data -// resource's ReadDataApply method to read data from the data source. -type EvalReadDataApply struct { - Addr addrs.ResourceInstance - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - Config *configs.Resource - Change **plans.ResourceInstanceChange -} - -func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider - change := *n.Change - providerSchema := *n.ProviderSchema - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If the diff is for *destroying* this resource then we'll - // just drop its state and move on, since data resources don't - // support an actual "destroy" action. - if change != nil && change.Action == plans.Delete { - if n.Output != nil { - *n.Output = nil - } - return nil, nil - } - - // For the purpose of external hooks we present a data apply as a - // "Refresh" rather than an "Apply" because creating a data source - // is presented to users/callers as a "read" operation. - err := ctx.Hook(func(h Hook) (HookAction, error) { - // We don't have a state yet, so we'll just give the hook an - // empty one to work with. - return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) - }) - if err != nil { - return nil, err - } - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: change.After, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) - } - - newVal := resp.State - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go deleted file mode 100644 index fd50f873..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go +++ /dev/null @@ -1,107 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalRefresh is an EvalNode implementation that does a refresh for -// a resource. -type EvalRefresh struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - Output **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) - return nil, diags.ErrWithWarnings() - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - // Call pre-refresh hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, state.Value) - }) - if err != nil { - return nil, diags.ErrWithWarnings() - } - - // Refresh! - priorVal := state.Value - req := providers.ReadResourceRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: priorVal, - Private: state.Private, - } - - provider := *n.Provider - resp := provider.ReadResource(req) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - if resp.NewState == cty.NilVal { - // This ought not to happen in real cases since it's not possible to - // send NilVal over the plugin RPC channel, but it can come up in - // tests due to sloppy mocking. - panic("new state is cty.NilVal") - } - - for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - newState := state.DeepCopy() - newState.Value = resp.NewState - newState.Private = resp.Private - newState.Dependencies = state.Dependencies - - // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) - }) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = newState - } - - return nil, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go deleted file mode 100644 index 3485e4f1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go +++ /dev/null @@ -1,42 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalSequence is an EvalNode that evaluates in sequence. -type EvalSequence struct { - Nodes []EvalNode -} - -func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - for _, n := range n.Nodes { - if n == nil { - continue - } - - if _, err := EvalRaw(n, ctx); err != nil { - if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { - // In this path we abort early, losing any non-error - // diagnostics we saw earlier. - return nil, err - } - diags = diags.Append(err) - if diags.HasErrors() { - // Halt if we get some errors, but warnings are okay. - break - } - } - } - - return nil, diags.ErrWithWarnings() -} - -// EvalNodeFilterable impl. -func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { - for i, node := range n.Nodes { - n.Nodes[i] = fn(node) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go deleted file mode 100644 index fe5abb24..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ /dev/null @@ -1,566 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalReadState is an EvalNode implementation that reads the -// current object for a specific instance in the state. -type EvalReadState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadState used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadState used with no ProviderSchema object") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) - - src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalReadStateDeposed is an EvalNode implementation that reads the -// deposed InstanceState for a specific resource out of the state -type EvalReadStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key identifies which deposed object we will read. - Key states.DeposedKey - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadStateDeposed used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadStateDeposed used with no ProviderSchema object") - } - - key := n.Key - if key == states.NotDeposed { - return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") - } - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - - src := ctx.State().ResourceInstanceObject(absAddr, key) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalRequireState is an EvalNode implementation that exits early if the given -// object is null. -type EvalRequireState struct { - State **states.ResourceInstanceObject -} - -func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - return nil, EvalEarlyExitError{} - } - - state := *n.State - if state == nil || state.Value.IsNull() { - return nil, EvalEarlyExitError{} - } - - return nil, nil -} - -// EvalUpdateStateHook is an EvalNode implementation that calls the -// PostStateUpdate hook with the current state. -type EvalUpdateStateHook struct{} - -func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - // In principle we could grab the lock here just long enough to take a - // deep copy and then pass that to our hooks below, but we'll instead - // hold the hook for the duration to avoid the potential confusing - // situation of us racing to call PostStateUpdate concurrently with - // different state snapshots. - stateSync := ctx.State() - state := stateSync.Lock().DeepCopy() - defer stateSync.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - if err != nil { - return nil, err - } - - return nil, nil -} - -// EvalWriteState is an EvalNode implementation that saves the given object -// as the current object for the selected resource instance. -type EvalWriteState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig - - // Dependencies are the inter-resource dependencies to be stored in the - // state. - Dependencies *[]addrs.AbsResource -} - -func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteState used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - if n.ProviderAddr.ProviderConfig.Type.LegacyString() == "" { - return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) - } - obj := *n.State - if obj == nil || obj.Value.IsNull() { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) - return nil, nil - } - - // store the new deps in the state - if n.Dependencies != nil { - log.Printf("[TRACE] EvalWriteState: recording %d dependencies for %s", len(*n.Dependencies), absAddr) - obj.Dependencies = *n.Dependencies - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteState used with pointer to nil ProviderSchema object") - } - - if obj != nil { - log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) - } else { - log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) - return nil, nil -} - -// EvalWriteStateDeposed is an EvalNode implementation that writes -// an InstanceState out to the Deposed list of a resource in the state. -type EvalWriteStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key indicates which deposed object to write to. - Key states.DeposedKey - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteStateDeposed used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - key := n.Key - state := ctx.State() - - if key == states.NotDeposed { - // should never happen - return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) - } - - obj := *n.State - if obj == nil { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteStateDeposed used with no ProviderSchema object") - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) - state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) - return nil, nil -} - -// EvalDeposeState is an EvalNode implementation that moves the current object -// for the given instance to instead be a deposed object, leaving the instance -// with no current object. -// This is used at the beginning of a create-before-destroy replace action so -// that the create can create while preserving the old state of the -// to-be-destroyed object. -type EvalDeposeState struct { - Addr addrs.ResourceInstance - - // ForceKey, if a value other than states.NotDeposed, will be used as the - // key for the newly-created deposed object that results from this action. - // If set to states.NotDeposed (the zero value), a new unique key will be - // allocated. - ForceKey states.DeposedKey - - // OutputKey, if non-nil, will be written with the deposed object key that - // was generated for the object. This can then be passed to - // EvalUndeposeState.Key so it knows which deposed instance to forget. - OutputKey *states.DeposedKey -} - -// TODO: test -func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - var key states.DeposedKey - if n.ForceKey == states.NotDeposed { - key = state.DeposeResourceInstanceObject(absAddr) - } else { - key = n.ForceKey - state.DeposeResourceInstanceObjectForceKey(absAddr, key) - } - log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - - if n.OutputKey != nil { - *n.OutputKey = key - } - - return nil, nil -} - -// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will -// restore a particular deposed object of the specified resource instance -// to be the "current" object if and only if the instance doesn't currently -// have a current object. -// -// This is intended for use when the create leg of a create before destroy -// fails with no partial new object: if we didn't take any action, the user -// would be left in the unfortunate situation of having no current object -// and the previously-workign object now deposed. This EvalNode causes a -// better outcome by restoring things to how they were before the replace -// operation began. -// -// The create operation may have produced a partial result even though it -// failed and it's important that we don't "forget" that state, so in that -// situation the prior object remains deposed and the partial new object -// remains the current object, allowing the situation to hopefully be -// improved in a subsequent run. -type EvalMaybeRestoreDeposedObject struct { - Addr addrs.ResourceInstance - - // PlannedChange might be the action we're performing that includes - // the possiblity of restoring a deposed object. However, it might also - // be nil. It's here only for use in error messages and must not be - // used for business logic. - PlannedChange **plans.ResourceInstanceChange - - // Key is a pointer to the deposed object key that should be forgotten - // from the state, which must be non-nil. - Key *states.DeposedKey -} - -// TODO: test -func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - dk := *n.Key - state := ctx.State() - - if dk == states.NotDeposed { - // This should never happen, and so it always indicates a bug. - // We should evaluate this node only if we've previously deposed - // an object as part of the same operation. - var diags tfdiags.Diagnostics - if n.PlannedChange != nil && *n.PlannedChange != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This occurred during a %s action. This is a bug in Terraform; please report it!", - absAddr, (*n.PlannedChange).Action, - ), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This is a bug in Terraform; please report it!", - absAddr, - ), - )) - } - return nil, diags.Err() - } - - restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) - if restored { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) - } else { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) - } - - return nil, nil -} - -// EvalWriteResourceState is an EvalNode implementation that ensures that -// a suitable resource-level state record is present in the state, if that's -// required for the "each mode" of that resource. -// -// This is important primarily for the situation where count = 0, since this -// eval is the only change we get to set the resource "each mode" to list -// in that case, allowing expression evaluation to see it as a zero-element -// list rather than as not set at all. -type EvalWriteResourceState struct { - Addr addrs.Resource - Config *configs.Resource - ProviderAddr addrs.AbsProviderConfig -} - -// TODO: test -func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - eachMode := states.NoEach - if count >= 0 { // -1 signals "count not set" - eachMode = states.EachList - } - - forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - if forEach != nil { - eachMode = states.EachMap - } - - // This method takes care of all of the business logic of updating this - // while ensuring that any existing instances are preserved, etc. - state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) - - return nil, nil -} - -// EvalForgetResourceState is an EvalNode implementation that prunes out an -// empty resource-level state for a given resource address, or produces an -// error if it isn't empty after all. -// -// This should be the last action taken for a resource that has been removed -// from the configuration altogether, to clean up the leftover husk of the -// resource in the state after other EvalNodes have destroyed and removed -// all of the instances and instance objects beneath it. -type EvalForgetResourceState struct { - Addr addrs.Resource -} - -func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - pruned := state.RemoveResourceIfEmpty(absAddr) - if !pruned { - // If this produces an error, it indicates a bug elsewhere in Terraform - // -- probably missing graph nodes, graph edges, or - // incorrectly-implemented evaluation steps. - return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) - } - log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) - - return nil, nil -} - -// EvalRefreshDependencies is an EvalNode implementation that appends any newly -// found dependencies to those saved in the state. The existing dependencies -// are retained, as they may be missing from the config, and will be required -// for the updates and destroys during the next apply. -type EvalRefreshDependencies struct { - // Prior State - State **states.ResourceInstanceObject - // Dependencies to write to the new state - Dependencies *[]addrs.AbsResource -} - -func (n *EvalRefreshDependencies) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - if state == nil { - // no existing state to append - return nil, nil - } - - depMap := make(map[string]addrs.AbsResource) - for _, d := range *n.Dependencies { - depMap[d.String()] = d - } - - // We have already dependencies in state, so we need to trust those for - // refresh. We can't write out new dependencies until apply time in case - // the configuration has been changed in a manner the conflicts with the - // stored dependencies. - if len(state.Dependencies) > 0 { - *n.Dependencies = state.Dependencies - return nil, nil - } - - deps := make([]addrs.AbsResource, 0, len(depMap)) - for _, d := range depMap { - deps = append(deps, d) - } - - sort.Slice(deps, func(i, j int) bool { - return deps[i].String() < deps[j].String() - }) - - *n.Dependencies = deps - - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go deleted file mode 100644 index e1940005..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go +++ /dev/null @@ -1,106 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// UpgradeResourceState will, if necessary, run the provider-defined upgrade -// logic against the given state object to make it compliant with the -// current schema version. This is a no-op if the given state object is -// already at the latest version. -// -// If any errors occur during upgrade, error diagnostics are returned. In that -// case it is not safe to proceed with using the original state object. -func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { - if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { - // We only do state upgrading for managed resources. - return src, nil - } - - stateIsFlatmap := len(src.AttrsJSON) == 0 - - providerType := addr.Resource.Resource.DefaultProviderConfig().Type - if src.SchemaVersion > currentVersion { - log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource instance managed by newer provider version", - // This is not a very good error message, but we don't retain enough - // information in state to give good feedback on what provider - // version might be required here. :( - fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), - )) - return nil, diags - } - - // If we get down here then we need to upgrade the state, with the - // provider's help. - // If this state was originally created by a version of Terraform prior to - // v0.12, this also includes translating from legacy flatmap to new-style - // representation, since only the provider has enough information to - // understand a flatmap built against an older schema. - if src.SchemaVersion != currentVersion { - log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) - } else { - log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) - } - - req := providers.UpgradeResourceStateRequest{ - TypeName: addr.Resource.Resource.Type, - - // TODO: The internal schema version representations are all using - // uint64 instead of int64, but unsigned integers aren't friendly - // to all protobuf target languages so in practice we use int64 - // on the wire. In future we will change all of our internal - // representations to int64 too. - Version: int64(src.SchemaVersion), - } - - if stateIsFlatmap { - req.RawStateFlatmap = src.AttrsFlat - } else { - req.RawStateJSON = src.AttrsJSON - } - - resp := provider.UpgradeResourceState(req) - diags := resp.Diagnostics - if diags.HasErrors() { - return nil, diags - } - - // After upgrading, the new value must conform to the current schema. When - // going over RPC this is actually already ensured by the - // marshaling/unmarshaling of the new value, but we'll check it here - // anyway for robustness, e.g. for in-process providers. - newValue := resp.UpgradedState - if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid resource state upgrade", - fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), - )) - } - return nil, diags - } - - new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) - if err != nil { - // We already checked for type conformance above, so getting into this - // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Failed to encode result of resource state upgrade", - fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), - )) - } - return new, diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go deleted file mode 100644 index 5b2146a5..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ /dev/null @@ -1,588 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EvalValidateCount is an EvalNode implementation that validates -// the count of a resource. -type EvalValidateCount struct { - Resource *configs.Resource -} - -// TODO: test -func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - var count int - var err error - - val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - goto RETURN - } - if val.IsNull() || !val.IsKnown() { - goto RETURN - } - - err = gocty.FromCtyValue(val, &count) - if err != nil { - // The EvaluateExpr call above already guaranteed us a number value, - // so if we end up here then we have something that is out of range - // for an int, and the error message will include a description of - // the valid range. - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), - Subject: n.Resource.Count.Range().Ptr(), - }) - } else if count < 0 { - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), - Subject: n.Resource.Count.Range().Ptr(), - }) - } - -RETURN: - return nil, diags.NonFatalErr() -} - -// EvalValidateProvider is an EvalNode implementation that validates -// a provider configuration. -type EvalValidateProvider struct { - Addr addrs.ProviderConfig - Provider *providers.Interface - Config *configs.Provider -} - -func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - provider := *n.Provider - - configBody := buildProviderConfig(ctx, n.Addr, n.Config) - - resp := provider.GetSchema() - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - - configSchema := resp.Provider.Block - if configSchema == nil { - // Should never happen in real code, but often comes up in tests where - // mock schemas are being used that tend to be incomplete. - log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) - configSchema = &configschema.Block{} - } - - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) - if evalDiags.HasErrors() { - return nil, diags.NonFatalErr() - } - - req := providers.PrepareProviderConfigRequest{ - Config: configVal, - } - - validateResp := provider.PrepareProviderConfig(req) - diags = diags.Append(validateResp.Diagnostics) - - return nil, diags.NonFatalErr() -} - -// EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a provisioner belonging to a resource. The provisioner -// config is expected to contain the merged connection configurations. -type EvalValidateProvisioner struct { - ResourceAddr addrs.Resource - Provisioner *provisioners.Interface - Schema **configschema.Block - Config *configs.Provisioner - ResourceHasCount bool - ResourceHasForEach bool -} - -func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { - provisioner := *n.Provisioner - config := *n.Config - schema := *n.Schema - - var diags tfdiags.Diagnostics - - { - // Validate the provisioner's own config first - - configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - if configVal == cty.NilVal { - // Should never happen for a well-behaved EvaluateBlock implementation - return nil, fmt.Errorf("EvaluateBlock returned nil value") - } - - req := provisioners.ValidateProvisionerConfigRequest{ - Config: configVal, - } - - resp := provisioner.ValidateProvisionerConfig(req) - diags = diags.Append(resp.Diagnostics) - } - - { - // Now validate the connection config, which contains the merged bodies - // of the resource and provisioner connection blocks. - connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) - diags = diags.Append(connDiags) - } - - return nil, diags.NonFatalErr() -} - -func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - - var diags tfdiags.Diagnostics - - if config == nil || config.Config == nil { - // No block to validate - return diags - } - - // We evaluate here just by evaluating the block and returning any - // diagnostics we get, since evaluation alone is enough to check for - // extraneous arguments and incorrectly-typed arguments. - _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) - diags = diags.Append(configDiags) - - return diags -} - -func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - keyData := EvalDataForNoInstanceKey - selfAddr := n.ResourceAddr.Instance(addrs.NoKey) - - if n.ResourceHasCount { - // For a resource that has count, we allow count.index but don't - // know at this stage what it will return. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // "self" can't point to an unknown key, but we'll force it to be - // key 0 here, which should return an unknown value of the - // expected type since none of these elements are known at this - // point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) - } else if n.ResourceHasForEach { - // For a resource that has for_each, we allow each.value and each.key - // but don't know at this stage what it will return. - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - // "self" can't point to an unknown key, but we'll force it to be - // key "" here, which should return an unknown value of the - // expected type since none of these elements are known at - // this point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) - } - - return ctx.EvaluateBlock(body, schema, selfAddr, keyData) -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. Once that is done, we can remove -// this and use a type-specific schema from the communicator to validate -// exactly what is expected for a given connection type. -var connectionBlockSupersetSchema = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - // NOTE: "type" is not included here because it's treated special - // by the config loader and stored away in a separate field. - - // Common attributes for both connection types - "host": { - Type: cty.String, - Required: true, - }, - "type": { - Type: cty.String, - Optional: true, - }, - "user": { - Type: cty.String, - Optional: true, - }, - "password": { - Type: cty.String, - Optional: true, - }, - "port": { - Type: cty.String, - Optional: true, - }, - "timeout": { - Type: cty.String, - Optional: true, - }, - "script_path": { - Type: cty.String, - Optional: true, - }, - - // For type=ssh only (enforced in ssh communicator) - "private_key": { - Type: cty.String, - Optional: true, - }, - "certificate": { - Type: cty.String, - Optional: true, - }, - "host_key": { - Type: cty.String, - Optional: true, - }, - "agent": { - Type: cty.Bool, - Optional: true, - }, - "agent_identity": { - Type: cty.String, - Optional: true, - }, - "bastion_host": { - Type: cty.String, - Optional: true, - }, - "bastion_host_key": { - Type: cty.String, - Optional: true, - }, - "bastion_port": { - Type: cty.Number, - Optional: true, - }, - "bastion_user": { - Type: cty.String, - Optional: true, - }, - "bastion_password": { - Type: cty.String, - Optional: true, - }, - "bastion_private_key": { - Type: cty.String, - Optional: true, - }, - "bastion_certificate": { - Type: cty.String, - Optional: true, - }, - - // For type=winrm only (enforced in winrm communicator) - "https": { - Type: cty.Bool, - Optional: true, - }, - "insecure": { - Type: cty.Bool, - Optional: true, - }, - "cacert": { - Type: cty.String, - Optional: true, - }, - "use_ntlm": { - Type: cty.Bool, - Optional: true, - }, - }, -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. It's exported only for use in the -// configs/configupgrade package and should not be used from anywhere else. -// The caller may not modify any part of the returned schema data structure. -func ConnectionBlockSupersetSchema() *configschema.Block { - return connectionBlockSupersetSchema -} - -// EvalValidateResource is an EvalNode implementation that validates -// the configuration of a resource. -type EvalValidateResource struct { - Addr addrs.Resource - Provider *providers.Interface - ProviderSchema **ProviderSchema - Config *configs.Resource - - // IgnoreWarnings means that warnings will not be passed through. This allows - // "just-in-time" passes of validation to continue execution through warnings. - IgnoreWarnings bool - - // ConfigVal, if non-nil, will be updated with the value resulting from - // evaluating the given configuration body. Since validation is performed - // very early, this value is likely to contain lots of unknown values, - // but its type will conform to the schema of the resource type associated - // with the resource instance being validated. - ConfigVal *cty.Value -} - -func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - cfg := *n.Config - schema := *n.ProviderSchema - mode := cfg.Mode - - keyData := EvalDataForNoInstanceKey - if n.Config.Count != nil { - // If the config block has count, we'll evaluate with an unknown - // number as count.index so we can still type check even though - // we won't expand count until the plan phase. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // Basic type-checking of the count argument. More complete validation - // of this will happen when we DynamicExpand during the plan walk. - countDiags := n.validateCount(ctx, n.Config.Count) - diags = diags.Append(countDiags) - } - - if n.Config.ForEach != nil { - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.UnknownVal(cty.DynamicPseudoType), - } - - // Evaluate the for_each expression here so we can expose the diagnostics - forEachDiags := n.validateForEach(ctx, n.Config.ForEach) - diags = diags.Append(forEachDiags) - } - - for _, traversal := range n.Config.DependsOn { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if !refDiags.HasErrors() && len(ref.Remaining) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid depends_on reference", - Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", - Subject: ref.Remaining.SourceRange().Ptr(), - }) - } - - // The ref must also refer to something that exists. To test that, - // we'll just eval it and count on the fact that our evaluator will - // detect references to non-existent objects. - if !diags.HasErrors() { - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - if scope != nil { // sometimes nil in tests, due to incomplete mocks - _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) - diags = diags.Append(refDiags) - } - } - } - - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch mode { - case addrs.ManagedResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type", - Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks - for _, traversal := range cfg.Managed.IgnoreChanges { - moreDiags := schema.StaticValidateTraversal(traversal) - diags = diags.Append(moreDiags) - } - } - - req := providers.ValidateResourceTypeConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateResourceTypeConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - - if n.ConfigVal != nil { - *n.ConfigVal = configVal - } - - case addrs.DataResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source", - Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return nil, diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return nil, diags.Err() - } - - req := providers.ValidateDataSourceConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateDataSourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - } - - if n.IgnoreWarnings { - // If we _only_ have warnings then we'll return nil. - if diags.HasErrors() { - return nil, diags.NonFatalErr() - } - return nil, nil - } else { - // We'll return an error if there are any diagnostics at all, even if - // some of them are warnings. - return nil, diags.NonFatalErr() - } -} - -func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { - if expr == nil { - return nil - } - - var diags tfdiags.Diagnostics - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return diags - } - - if countVal.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - var err error - countVal, err = convert.Convert(countVal, cty.Number) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk. - if !countVal.IsKnown() { - return diags - } - - // If we _do_ know the value, then we can do a few more checks here. - var count int - err = gocty.FromCtyValue(countVal, &count) - if err != nil { - // Isn't a whole number, etc. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - return diags -} - -func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !known { - return diags - } - - if forEachDiags.HasErrors() { - diags = diags.Append(forEachDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go deleted file mode 100644 index dd5e4018..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go +++ /dev/null @@ -1,67 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that -// expressions within a particular referencable block do not reference that -// same block. -type EvalValidateSelfRef struct { - Addr addrs.Referenceable - Config hcl.Body - ProviderSchema **ProviderSchema -} - -func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - addr := n.Addr - - addrStrs := make([]string, 0, 1) - addrStrs = append(addrStrs, addr.String()) - switch tAddr := addr.(type) { - case addrs.ResourceInstance: - // A resource instance may not refer to its containing resource either. - addrStrs = append(addrStrs, tAddr.ContainingResource().String()) - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) - } - - providerSchema := *n.ProviderSchema - var schema *configschema.Block - switch tAddr := addr.(type) { - case addrs.Resource: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr) - case addrs.ResourceInstance: - schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) - } - - if schema == nil { - return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) - } - - refs, _ := lang.ReferencesInBlock(n.Config, schema) - for _, ref := range refs { - for _, addrStr := range addrStrs { - if ref.Subject.String() == addrStr { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Self-referential block", - Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return nil, diags.NonFatalErr() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go deleted file mode 100644 index e8a88a14..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go +++ /dev/null @@ -1,232 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "reflect" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// EvalSetModuleCallArguments is an EvalNode implementation that sets values -// for arguments of a child module call, for later retrieval during -// expression evaluation. -type EvalSetModuleCallArguments struct { - Module addrs.ModuleCallInstance - Values map[string]cty.Value -} - -// TODO: test -func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetModuleCallArguments(n.Module, n.Values) - return nil, nil -} - -// EvalModuleCallArgument is an EvalNode implementation that produces the value -// for a particular variable as will be used by a child module instance. -// -// The result is written into the map given in Values, with its key -// set to the local name of the variable, disregarding the module instance -// address. Any existing values in that map are deleted first. This weird -// interface is a result of trying to be convenient for use with -// EvalContext.SetModuleCallArguments, which expects a map to merge in with -// any existing arguments. -type EvalModuleCallArgument struct { - Addr addrs.InputVariable - Config *configs.Variable - Expr hcl.Expression - - // If this flag is set, any diagnostics are discarded and this operation - // will always succeed, though may produce an unknown value in the - // event of an error. - IgnoreDiagnostics bool - - Values map[string]cty.Value -} - -func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { - // Clear out the existing mapping - for k := range n.Values { - delete(n.Values, k) - } - - wantType := n.Config.Type - name := n.Addr.Name - expr := n.Expr - - if expr == nil { - // Should never happen, but we'll bail out early here rather than - // crash in case it does. We set no value at all in this case, - // making a subsequent call to EvalContext.SetModuleCallArguments - // a no-op. - log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) - return nil, nil - } - - val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) - - // We intentionally passed DynamicPseudoType to EvaluateExpr above because - // now we can do our own local type conversion and produce an error message - // with better context if it fails. - var convErr error - val, convErr = convert.Convert(val, wantType) - if convErr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for module argument", - Detail: fmt.Sprintf( - "The given value is not suitable for child module variable %q defined at %s: %s.", - name, n.Config.DeclRange.String(), convErr, - ), - Subject: expr.Range().Ptr(), - }) - // We'll return a placeholder unknown value to avoid producing - // redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - n.Values[name] = val - if n.IgnoreDiagnostics { - return nil, nil - } - return nil, diags.ErrWithWarnings() -} - -// evalVariableValidations is an EvalNode implementation that ensures that -// all of the configured custom validations for a variable are passing. -// -// This must be used only after any side-effects that make the value of the -// variable available for use in expression evaluation, such as -// EvalModuleCallArgument for variables in descendent modules. -type evalVariableValidations struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable - - // Expr is the expression that provided the value for the variable, if any. - // This will be nil for root module variables, because their values come - // from outside the configuration. - Expr hcl.Expression - - // If this flag is set, this node becomes a no-op. - // This is here for consistency with EvalModuleCallArgument so that it - // can be populated with the same value, where needed. - IgnoreDiagnostics bool -} - -func (n *evalVariableValidations) Eval(ctx EvalContext) (interface{}, error) { - if n.Config == nil || n.IgnoreDiagnostics || len(n.Config.Validations) == 0 { - log.Printf("[TRACE] evalVariableValidations: not active for %s, so skipping", n.Addr) - return nil, nil - } - - var diags tfdiags.Diagnostics - - // Variable nodes evaluate in the parent module to where they were declared - // because the value expression (n.Expr, if set) comes from the calling - // "module" block in the parent module. - // - // Validation expressions are statically validated (during configuration - // loading) to refer only to the variable being validated, so we can - // bypass our usual evaluation machinery here and just produce a minimal - // evaluation context containing just the required value, and thus avoid - // the problem that ctx's evaluation functions refer to the wrong module. - val := ctx.GetVariableValue(n.Addr) - hclCtx := &hcl.EvalContext{ - Variables: map[string]cty.Value{ - "var": cty.ObjectVal(map[string]cty.Value{ - n.Config.Name: val, - }), - }, - Functions: ctx.EvaluationScope(nil, EvalDataForNoInstanceKey).Functions(), - } - - for _, validation := range n.Config.Validations { - const errInvalidCondition = "Invalid variable validation result" - const errInvalidValue = "Invalid value for variable" - - result, moreDiags := validation.Condition.Value(hclCtx) - diags = diags.Append(moreDiags) - if moreDiags.HasErrors() { - log.Printf("[TRACE] evalVariableValidations: %s rule %s condition expression failed: %s", n.Addr, validation.DeclRange, diags.Err().Error()) - } - if !result.IsKnown() { - log.Printf("[TRACE] evalVariableValidations: %s rule %s condition value is unknown, so skipping validation for now", n.Addr, validation.DeclRange) - continue // We'll wait until we've learned more, then. - } - if result.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: "Validation condition expression must return either true or false, not null.", - Subject: validation.Condition.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - continue - } - var err error - result, err = convert.Convert(result, cty.Bool) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidCondition, - Detail: fmt.Sprintf("Invalid validation condition result value: %s.", tfdiags.FormatError(err)), - Subject: validation.Condition.Range().Ptr(), - Expression: validation.Condition, - EvalContext: hclCtx, - }) - continue - } - - if result.False() { - if n.Expr != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidValue, - Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", validation.ErrorMessage, validation.DeclRange.String()), - Subject: n.Expr.Range().Ptr(), - }) - } else { - // Since we don't have a source expression for a root module - // variable, we'll just report the error from the perspective - // of the variable declaration itself. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errInvalidValue, - Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", validation.ErrorMessage, validation.DeclRange.String()), - Subject: n.Config.DeclRange.Ptr(), - }) - } - } - } - - return nil, diags.ErrWithWarnings() -} - -// hclTypeName returns the name of the type that would represent this value in -// a config file, or falls back to the Go type name if there's no corresponding -// HCL type. This is used for formatted output, not for comparing types. -func hclTypeName(i interface{}) string { - switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k { - case reflect.Bool: - return "boolean" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: - return "number" - case reflect.Array, reflect.Slice: - return "list" - case reflect.Map: - return "map" - case reflect.String: - return "string" - default: - // fall back to the Go type if there's no match - return k.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go deleted file mode 100644 index f163a727..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/providers" -) - -// ProviderEvalTree returns the evaluation tree for initializing and -// configuring providers. -func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { - var provider providers.Interface - - addr := n.Addr - relAddr := addr.ProviderConfig - - seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{ - TypeName: relAddr.Type.LegacyString(), - Addr: addr.ProviderConfig, - }) - - // Input stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - &EvalValidateProvider{ - Addr: relAddr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - // Apply stuff - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: addr, - Output: &provider, - }, - }, - }, - }) - - // We configure on everything but validate, since validate may - // not have access to all the variables. - seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalConfigProvider{ - Addr: relAddr, - Provider: &provider, - Config: config, - }, - }, - }, - }) - - return &EvalSequence{Nodes: seq} -} - -// CloseProviderEvalTree returns the evaluation tree for closing -// provider connections that aren't needed anymore. -func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { - return &EvalCloseProvider{Addr: addr.ProviderConfig} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go deleted file mode 100644 index 5798d103..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go +++ /dev/null @@ -1,883 +0,0 @@ -package terraform - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "sync" - - "github.com/agext/levenshtein" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// Evaluator provides the necessary contextual data for evaluating expressions -// for a particular walk operation. -type Evaluator struct { - // Operation defines what type of operation this evaluator is being used - // for. - Operation walkOperation - - // Meta is contextual metadata about the current operation. - Meta *ContextMeta - - // Config is the root node in the configuration tree. - Config *configs.Config - - // VariableValues is a map from variable names to their associated values, - // within the module indicated by ModulePath. VariableValues is modified - // concurrently, and so it must be accessed only while holding - // VariableValuesLock. - // - // The first map level is string representations of addr.ModuleInstance - // values, while the second level is variable names. - VariableValues map[string]map[string]cty.Value - VariableValuesLock *sync.Mutex - - // Schemas is a repository of all of the schemas we should need to - // evaluate expressions. This must be constructed by the caller to - // include schemas for all of the providers, resource types, data sources - // and provisioners used by the given configuration and state. - // - // This must not be mutated during evaluation. - Schemas *Schemas - - // State is the current state, embedded in a wrapper that ensures that - // it can be safely accessed and modified concurrently. - State *states.SyncState - - // Changes is the set of proposed changes, embedded in a wrapper that - // ensures they can be safely accessed and modified concurrently. - Changes *plans.ChangesSync -} - -// Scope creates an evaluation scope for the given module path and optional -// resource. -// -// If the "self" argument is nil then the "self" object is not available -// in evaluated expressions. Otherwise, it behaves as an alias for the given -// address. -func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { - return &lang.Scope{ - Data: data, - SelfAddr: self, - PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, - BaseDir: ".", // Always current working directory for now. - } -} - -// evaluationStateData is an implementation of lang.Data that resolves -// references primarily (but not exclusively) using information from a State. -type evaluationStateData struct { - Evaluator *Evaluator - - // ModulePath is the path through the dynamic module tree to the module - // that references will be resolved relative to. - ModulePath addrs.ModuleInstance - - // InstanceKeyData describes the values, if any, that are accessible due - // to repetition of a containing object using "count" or "for_each" - // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, - // since the user specifies in that case which variable name to locally - // shadow.) - InstanceKeyData InstanceKeyEvalData - - // Operation records the type of walk the evaluationStateData is being used - // for. - Operation walkOperation -} - -// InstanceKeyEvalData is used during evaluation to specify which values, -// if any, should be produced for count.index, each.key, and each.value. -type InstanceKeyEvalData struct { - // CountIndex is the value for count.index, or cty.NilVal if evaluating - // in a context where the "count" argument is not active. - // - // For correct operation, this should always be of type cty.Number if not - // nil. - CountIndex cty.Value - - // EachKey and EachValue are the values for each.key and each.value - // respectively, or cty.NilVal if evaluating in a context where the - // "for_each" argument is not active. These must either both be set - // or neither set. - // - // For correct operation, EachKey must always be either of type cty.String - // or cty.Number if not nil. - EachKey, EachValue cty.Value -} - -// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for -// evaluating in a context that has the given instance key. -func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { - var evalData InstanceKeyEvalData - if key == nil { - return evalData - } - - keyValue := key.Value() - switch keyValue.Type() { - case cty.String: - evalData.EachKey = keyValue - evalData.EachValue = forEachMap[keyValue.AsString()] - case cty.Number: - evalData.CountIndex = keyValue - } - return evalData -} - -// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance -// key values at all, suitable for use in contexts where no keyed instance -// is relevant. -var EvalDataForNoInstanceKey = InstanceKeyEvalData{} - -// evaluationStateData must implement lang.Data -var _ lang.Data = (*evaluationStateData)(nil) - -func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "index": - idxVal := d.InstanceKeyData.CountIndex - if idxVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "count" in non-counted context`, - Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.Number), diags - } - return idxVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "count" attribute`, - Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var returnVal cty.Value - switch addr.Name { - - case "key": - returnVal = d.InstanceKeyData.EachKey - case "value": - returnVal = d.InstanceKeyData.EachValue - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `each.value cannot be used in this context`, - Detail: fmt.Sprintf(`A reference to "each.value" has been used in a context in which it unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "each" attribute`, - Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - if returnVal == cty.NilVal { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to "each" in context without for_each`, - Detail: fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`), - Subject: rng.ToHCL().Ptr(), - }) - return cty.UnknownVal(cty.DynamicPseudoType), diags - } - return returnVal, diags -} - -func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Variables[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Variables { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } else { - suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared input variable`, - Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - wantType := cty.DynamicPseudoType - if config.Type != cty.NilType { - wantType = config.Type - } - - d.Evaluator.VariableValuesLock.Lock() - defer d.Evaluator.VariableValuesLock.Unlock() - - // During the validate walk, input variables are always unknown so - // that we are validating the configuration for all possible input values - // rather than for a specific set. Checking against a specific set of - // input values then happens during the plan walk. - // - // This is important because otherwise the validation walk will tend to be - // overly strict, requiring expressions throughout the configuration to - // be complicated to accommodate all possible inputs, whereas returning - // known here allows for simpler patterns like using input values as - // guards to broadly enable/disable resources, avoid processing things - // that are disabled, etc. Terraform's static validation leans towards - // being liberal in what it accepts because the subsequent plan walk has - // more information available and so can be more conservative. - if d.Operation == walkValidate { - return cty.UnknownVal(wantType), diags - } - - moduleAddrStr := d.ModulePath.String() - vals := d.Evaluator.VariableValues[moduleAddrStr] - if vals == nil { - return cty.UnknownVal(wantType), diags - } - - val, isSet := vals[addr.Name] - if !isSet { - if config.Default != cty.NilVal { - return config.Default, diags - } - return cty.UnknownVal(wantType), diags - } - - var err error - val, err = convert.Convert(val, wantType) - if err != nil { - // We should never get here because this problem should've been caught - // during earlier validation, but we'll do something reasonable anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Incorrect variable type`, - Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), - Subject: &config.DeclRange, - }) - // Stub out our return value so that the semantic checker doesn't - // produce redundant downstream errors. - val = cty.UnknownVal(wantType) - } - - return val, diags -} - -func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // First we'll make sure the requested value is declared in configuration, - // so we can produce a nice message if not. - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) - } - - config := moduleConfig.Module.Locals[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Locals { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared local value`, - Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) - if val == cty.NilVal { - // Not evaluated yet? - val = cty.DynamicVal - } - - return val, diags -} - -func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - moduleAddr := addr.ModuleInstance(d.ModulePath) - - // We'll consult the configuration to see what output names we are - // expecting, so we can ensure the resulting object is of the expected - // type even if our data is incomplete for some reason. - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since this should've been caught during - // static validation. - panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) - } - outputConfigs := moduleConfig.Module.Outputs - - vals := map[string]cty.Value{} - for n := range outputConfigs { - addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) - - // If a pending change is present in our current changeset then its value - // takes priority over what's in state. (It will usually be the same but - // will differ if the new value is unknown during planning.) - if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) - vals[n] = cty.DynamicVal - continue - } - // We care only about the "after" value, which is the value this output - // will take on after the plan is applied. - vals[n] = change.After - } else { - os := d.Evaluator.State.OutputValue(addr) - if os == nil { - // Not evaluated yet? - vals[n] = cty.DynamicVal - continue - } - vals[n] = os.Value - } - } - return cty.ObjectVal(vals), diags -} - -func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // Output results live in the module that declares them, which is one of - // the child module instances of our current module path. - absAddr := addr.AbsOutputValue(d.ModulePath) - moduleAddr := absAddr.Module - - // First we'll consult the configuration to see if an output of this - // name is declared at all. - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // this doesn't happen in normal circumstances due to our validation - // pass, but it can turn up in some unusual situations, like in the - // "terraform console" repl where arbitrary expressions can be - // evaluated. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - config := moduleConfig.Module.Outputs[addr.Name] - if config == nil { - var suggestions []string - for k := range moduleConfig.Module.Outputs { - suggestions = append(suggestions, k) - } - suggestion := nameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared output value`, - Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - // If a pending change is present in our current changeset then its value - // takes priority over what's in state. (It will usually be the same but - // will differ if the new value is unknown during planning.) - if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { - change, err := changeSrc.Decode() - if err != nil { - // This should happen only if someone has tampered with a plan - // file, so we won't bother with a pretty error for it. - diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) - return cty.DynamicVal, diags - } - // We care only about the "after" value, which is the value this output - // will take on after the plan is applied. - return change.After, diags - } - - os := d.Evaluator.State.OutputValue(absAddr) - if os == nil { - // Not evaluated yet? - return cty.DynamicVal, diags - } - - return os.Value, diags -} - -func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "cwd": - wd, err := os.Getwd() - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Failed to get working directory`, - Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - return cty.StringVal(filepath.ToSlash(wd)), diags - - case "module": - moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) - } - sourceDir := moduleConfig.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - case "root": - sourceDir := d.Evaluator.Config.Module.SourceDir - return cty.StringVal(filepath.ToSlash(sourceDir)), diags - - default: - suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "path" attribute`, - Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - // First we'll consult the configuration to see if an resource of this - // name is declared at all. - moduleAddr := d.ModulePath - moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) - if moduleConfig == nil { - // should never happen, since we can't be evaluating in a module - // that wasn't mentioned in configuration. - panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) - } - - config := moduleConfig.Module.ResourceByAddr(addr) - if config == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) - - if rs == nil { - // we must return DynamicVal so that both interpretations - // can proceed without generating errors, and we'll deal with this - // in a later step where more information is gathered. - // (In practice we should only end up here during the validate walk, - // since later walks should have at least partial states populated - // for all resources in the configuration.) - return cty.DynamicVal, diags - } - - // Break out early during validation, because resource may not be expanded - // yet and indexed references may show up as invalid. - if d.Operation == walkValidate { - return cty.DynamicVal, diags - } - - return d.getResourceInstancesAll(addr, rng, config, rs, rs.ProviderConfig) -} - -func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - instAddr := addrs.ResourceInstance{Resource: addr, Key: addrs.NoKey} - - schema := d.getResourceSchema(addr, providerAddr) - if schema == nil { - // This shouldn't happen, since validation before we get here should've - // taken care of it, but we'll show a reasonable error message anyway. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource type schema`, - Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } - - switch rs.EachMode { - case states.NoEach: - ty := schema.ImpliedType() - is := rs.Instances[addrs.NoKey] - if is == nil || is.Current == nil { - // Assume we're dealing with an instance that hasn't been created yet. - return cty.UnknownVal(ty), diags - } - - if is.Current.Status == states.ObjectPlanned { - // If there's a pending change for this instance in our plan, we'll prefer - // that. This is important because the state can't represent unknown values - // and so its data is inaccurate when changes are pending. - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr.Absolute(d.ModulePath), states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - return val, diags - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), - Subject: &config.DeclRange, - }) - return cty.UnknownVal(ty), diags - } - - return ios.Value, diags - - case states.EachList: - // We need to infer the length of our resulting tuple by searching - // for the max IntKey in our instances map. - length := 0 - for k := range rs.Instances { - if ik, ok := k.(addrs.IntKey); ok { - if int(ik) >= length { - length = int(ik) + 1 - } - } - } - - vals := make([]cty.Value, length) - for i := 0; i < length; i++ { - ty := schema.ImpliedType() - key := addrs.IntKey(i) - is := rs.Instances[key] - if is == nil || is.Current == nil { - // There shouldn't normally be "gaps" in our list but we'll - // allow it under the assumption that we're in a weird situation - // where e.g. someone has run "terraform state mv" to reorder - // a list and left a hole behind. - vals[i] = cty.UnknownVal(schema.ImpliedType()) - continue - } - - instAddr := addr.Instance(key).Absolute(d.ModulePath) - - if is.Current.Status == states.ObjectPlanned { - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[i] = val - continue - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[i] = ios.Value - } - - // We use a tuple rather than a list here because resource schemas may - // include dynamically-typed attributes, which will then cause each - // instance to potentially have a different runtime type even though - // they all conform to the static schema. - return cty.TupleVal(vals), diags - - case states.EachMap: - ty := schema.ImpliedType() - vals := make(map[string]cty.Value, len(rs.Instances)) - for k, is := range rs.Instances { - if sk, ok := k.(addrs.StringKey); ok { - if is == nil || is.Current == nil { - // Assume we're dealing with an instance that hasn't been created yet. - vals[string(sk)] = cty.UnknownVal(schema.ImpliedType()) - continue - } - - instAddr := addr.Instance(k).Absolute(d.ModulePath) - - if is.Current.Status == states.ObjectPlanned { - if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { - val, err := change.After.Decode(ty) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in plan", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[string(sk)] = val - continue - } else { - // If the object is in planned status then we should not - // get here, since we should've found a pending value - // in the plan above instead. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Missing pending object in plan", - Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), - Subject: &config.DeclRange, - }) - continue - } - } - - ios, err := is.Current.Decode(ty) - if err != nil { - // This shouldn't happen, since by the time we get here - // we should've upgraded the state data already. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource instance data in state", - Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), - Subject: &config.DeclRange, - }) - continue - } - vals[string(sk)] = ios.Value - } - } - - // We use an object rather than a map here because resource schemas may - // include dynamically-typed attributes, which will then cause each - // instance to potentially have a different runtime type even though - // they all conform to the static schema. - return cty.ObjectVal(vals), diags - - default: - // Should never happen since caller should deal with other modes - panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) - } -} - -func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { - providerType := providerAddr.ProviderConfig.Type.LegacyString() - schemas := d.Evaluator.Schemas - schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) - return schema -} - -// coerceInstanceKey attempts to convert the given key to the type expected -// for the given EachMode. -// -// If the key is already of the correct type or if it cannot be converted then -// it is returned verbatim. If conversion is required and possible, the -// converted value is returned. Callers should not try to determine if -// conversion was possible, should instead just check if the result is of -// the expected type. -func (d *evaluationStateData) coerceInstanceKey(key addrs.InstanceKey, mode states.EachMode) addrs.InstanceKey { - if key == addrs.NoKey { - // An absent key can't be converted - return key - } - - switch mode { - case states.NoEach: - // No conversions possible at all - return key - case states.EachMap: - if intKey, isInt := key.(addrs.IntKey); isInt { - return addrs.StringKey(strconv.Itoa(int(intKey))) - } - return key - case states.EachList: - if strKey, isStr := key.(addrs.StringKey); isStr { - i, err := strconv.Atoi(string(strKey)) - if err != nil { - return key - } - return addrs.IntKey(i) - } - return key - default: - return key - } -} - -func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - switch addr.Name { - - case "workspace": - workspaceName := d.Evaluator.Meta.Env - return cty.StringVal(workspaceName), diags - - case "env": - // Prior to Terraform 0.12 there was an attribute "env", which was - // an alias name for "workspace". This was deprecated and is now - // removed. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "terraform" attribute`, - Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), - Subject: rng.ToHCL().Ptr(), - }) - return cty.DynamicVal, diags - } -} - -// nameSuggestion tries to find a name from the given slice of suggested names -// that is close to the given name and returns it if found. If no suggestion -// is close enough, returns the empty string. -// -// The suggestions are tried in order, so earlier suggestions take precedence -// if the given string is similar to two or more suggestions. -// -// This function is intended to be used with a relatively-small number of -// suggestions. It's not optimized for hundreds or thousands of them. -func nameSuggestion(given string, suggestions []string) string { - for _, suggestion := range suggestions { - dist := levenshtein.Distance(given, suggestion, nil) - if dist < 3 { // threshold determined experimentally - return suggestion - } - } - return "" -} - -// moduleDisplayAddr returns a string describing the given module instance -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleDisplayAddr(addr addrs.ModuleInstance) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go deleted file mode 100644 index 01f5ca1d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go +++ /dev/null @@ -1,299 +0,0 @@ -package terraform - -import ( - "fmt" - "sort" - - "github.com/hashicorp/hcl/v2" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/helper/didyoumean" - "github.com/hashicorp/terraform/tfdiags" -) - -// StaticValidateReferences checks the given references against schemas and -// other statically-checkable rules, producing error diagnostics if any -// problems are found. -// -// If this method returns errors for a particular reference then evaluating -// that reference is likely to generate a very similar error, so callers should -// not run this method and then also evaluate the source expression(s) and -// merge the two sets of diagnostics together, since this will result in -// confusing redundant errors. -// -// This method can find more errors than can be found by evaluating an -// expression with a partially-populated scope, since it checks the referenced -// names directly against the schema rather than relying on evaluation errors. -// -// The result may include warning diagnostics if, for example, deprecated -// features are referenced. -func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - for _, ref := range refs { - moreDiags := d.staticValidateReference(ref, self) - diags = diags.Append(moreDiags) - } - return diags -} - -func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { - modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) - if modCfg == nil { - // This is a bug in the caller rather than a problem with the - // reference, but rather than crashing out here in an unhelpful way - // we'll just ignore it and trust a different layer to catch it. - return nil - } - - if ref.Subject == addrs.Self { - // The "self" address is a special alias for the address given as - // our self parameter here, if present. - if self == nil { - var diags tfdiags.Diagnostics - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid "self" reference`, - // This detail message mentions some current practice that - // this codepath doesn't really "know about". If the "self" - // object starts being supported in more contexts later then - // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - return diags - } - - synthRef := *ref // shallow copy - synthRef.Subject = self - ref = &synthRef - } - - switch addr := ref.Subject.(type) { - - // For static validation we validate both resource and resource instance references the same way. - // We mostly disregard the index, though we do some simple validation of - // its _presence_ in staticValidateSingleResourceReference and - // staticValidateMultiResourceReference respectively. - case addrs.Resource: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - return diags - case addrs.ResourceInstance: - var diags tfdiags.Diagnostics - diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) - diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) - return diags - - // We also handle all module call references the same way, disregarding index. - case addrs.ModuleCall: - return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallInstance: - return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) - case addrs.ModuleCallOutput: - // This one is a funny one because we will take the output name referenced - // and use it to fake up a "remaining" that would make sense for the - // module call itself, rather than for the specific output, and then - // we can just re-use our static module call validation logic. - remain := make(hcl.Traversal, len(ref.Remaining)+1) - copy(remain[1:], ref.Remaining) - remain[0] = hcl.TraverseAttr{ - Name: addr.Name, - - // Using the whole reference as the source range here doesn't exactly - // match how HCL would normally generate an attribute traversal, - // but is close enough for our purposes. - SrcRange: ref.SourceRange.ToHCL(), - } - return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) - - default: - // Anything else we'll just permit through without any static validation - // and let it be caught during dynamic evaluation, in evaluate.go . - return nil - } -} - -func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - // If we have at least one step in "remain" and this resource has - // "count" set then we know for sure this in invalid because we have - // something like: - // aws_instance.foo.bar - // ...when we really need - // aws_instance.foo[count.index].bar - - // It is _not_ safe to do this check when remain is empty, because that - // would also match aws_instance.foo[count.index].bar due to `count.index` - // not being statically-resolvable as part of a reference, and match - // direct references to the whole aws_instance.foo tuple. - if len(remain) == 0 { - return nil - } - - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if cfg.Count != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - if cfg.ForEach != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Missing resource instance key`, - Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), - Subject: rng.ToHCL().Ptr(), - }) - } - - return diags -} - -func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) - if cfg == nil { - // We'll just bail out here and catch this in our subsequent call to - // staticValidateResourceReference, then. - return diags - } - - if addr.Key == addrs.NoKey { - // This is a different path into staticValidateSingleResourceReference - return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) - } else { - if cfg.Count == nil && cfg.ForEach == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Unexpected resource instance key`, - Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), - Subject: rng.ToHCL().Ptr(), - }) - } - } - - return diags -} - -func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - var modeAdjective string - switch addr.Mode { - case addrs.ManagedResourceMode: - modeAdjective = "managed" - case addrs.DataResourceMode: - modeAdjective = "data" - default: - // should never happen - modeAdjective = "" - } - - cfg := modCfg.Module.ResourceByAddr(addr) - if cfg == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared resource`, - Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // Normally accessing this directly is wrong because it doesn't take into - // account provider inheritance, etc but it's okay here because we're only - // paying attention to the type anyway. - providerType := cfg.ProviderConfigAddr().Type.LegacyString() - schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) - - if schema == nil { - // Prior validation should've taken care of a resource block with an - // unsupported type, so we should never get here but we'll handle it - // here anyway for robustness. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource type`, - Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - // As a special case we'll detect attempts to access an attribute called - // "count" and produce a special error for it, since versions of Terraform - // prior to v0.12 offered this as a weird special case that we can no - // longer support. - if len(remain) > 0 { - if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Invalid resource count attribute`, - Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - } - - // If we got this far then we'll try to validate the remaining traversal - // steps against our schema. - moreDiags := schema.StaticValidateTraversal(remain) - diags = diags.Append(moreDiags) - - return diags -} - -func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - // For now, our focus here is just in testing that the referenced module - // call exists. All other validation is deferred until evaluation time. - _, exists := modCfg.Module.ModuleCalls[addr.Name] - if !exists { - var suggestions []string - for name := range modCfg.Module.ModuleCalls { - suggestions = append(suggestions, name) - } - sort.Strings(suggestions) - suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) - if suggestion != "" { - suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) - } - - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: `Reference to undeclared module`, - Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), - Subject: rng.ToHCL().Ptr(), - }) - return diags - } - - return diags -} - -// moduleConfigDisplayAddr returns a string describing the given module -// address that is appropriate for returning to users in situations where the -// root module is possible. Specifically, it returns "the root module" if the -// root module instance is given, or a string representation of the module -// address otherwise. -func moduleConfigDisplayAddr(addr addrs.Module) string { - switch { - case addr.IsRoot(): - return "the root module" - default: - return addr.String() - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go deleted file mode 100644 index 97c77bdb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/features.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -import "os" - -// This file holds feature flags for the next release - -var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go deleted file mode 100644 index 58d45a7b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ /dev/null @@ -1,141 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/terraform/dag" -) - -// Graph represents the graph that Terraform uses to represent resources -// and their dependencies. -type Graph struct { - // Graph is the actual DAG. This is embedded so you can call the DAG - // methods directly. - dag.AcyclicGraph - - // Path is the path in the module tree that this Graph represents. - Path addrs.ModuleInstance - - // debugName is a name for reference in the debug output. This is usually - // to indicate what topmost builder was, and if this graph is a shadow or - // not. - debugName string -} - -func (g *Graph) DirectedGraph() dag.Grapher { - return &g.AcyclicGraph -} - -// Walk walks the graph with the given walker for callbacks. The graph -// will be walked with full parallelism, so the walker should expect -// to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { - return g.walk(walker) -} - -func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { - // The callbacks for enter/exiting a graph - ctx := walker.EnterPath(g.Path) - defer walker.ExitPath(g.Path) - - // Get the path for logs - path := ctx.Path().String() - - debugName := "walk-graph.json" - if g.debugName != "" { - debugName = g.debugName + "-" + debugName - } - - // Walk the graph. - var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { - log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) - g.DebugVisitInfo(v, g.debugName) - - defer func() { - log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) - }() - - walker.EnterVertex(v) - defer walker.ExitVertex(v, diags) - - // vertexCtx is the context that we use when evaluating. This - // is normally the context of our graph but can be overridden - // with a GraphNodeSubPath impl. - vertexCtx := ctx - if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(pn.Path()) - defer walker.ExitPath(pn.Path()) - } - - // If the node is eval-able, then evaluate it. - if ev, ok := v.(GraphNodeEvalable); ok { - tree := ev.EvalTree() - if tree == nil { - panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) - } - - // Allow the walker to change our tree if needed. Eval, - // then callback with the output. - log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) - - tree = walker.EnterEvalTree(v, tree) - output, err := Eval(tree, vertexCtx) - diags = diags.Append(walker.ExitEvalTree(v, output, err)) - if diags.HasErrors() { - return - } - } - - // If the node is dynamically expanded, then expand it - if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) - - g, err := ev.DynamicExpand(vertexCtx) - if err != nil { - diags = diags.Append(err) - return - } - if g != nil { - // Walk the subgraph - log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) - subDiags := g.walk(walker) - diags = diags.Append(subDiags) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) - } else { - log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) - } - } - - // If the node has a subgraph, then walk the subgraph - if sn, ok := v.(GraphNodeSubgraph); ok { - log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) - - g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) - - subDiags := sn.Subgraph().(*Graph).walk(walker) - if subDiags.HasErrors() { - log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) - return - } - log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) - } - - return - } - - return g.AcyclicGraph.Walk(walkFn) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go deleted file mode 100644 index 71b71b75..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go +++ /dev/null @@ -1,85 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/tfdiags" -) - -// GraphBuilder is an interface that can be implemented and used with -// Terraform to build the graph that Terraform walks. -type GraphBuilder interface { - // Build builds the graph for the given module path. It is up to - // the interface implementation whether this build should expand - // the graph or not. - Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) -} - -// BasicGraphBuilder is a GraphBuilder that builds a graph out of a -// series of transforms and (optionally) validates the graph is a valid -// structure. -type BasicGraphBuilder struct { - Steps []GraphTransformer - Validate bool - // Optional name to add to the graph debug log - Name string -} - -func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - g := &Graph{Path: path} - - var lastStepStr string - for _, step := range b.Steps { - if step == nil { - continue - } - log.Printf("[TRACE] Executing graph transform %T", step) - - stepName := fmt.Sprintf("%T", step) - dot := strings.LastIndex(stepName, ".") - if dot >= 0 { - stepName = stepName[dot+1:] - } - - debugOp := g.DebugOperation(stepName, "") - err := step.Transform(g) - - errMsg := "" - if err != nil { - errMsg = err.Error() - } - debugOp.End(errMsg) - - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s ------", step, logging.Indent(thisStepStr)) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] Completed graph transform %T (no changes)", step) - } - - if err != nil { - if nf, isNF := err.(tfdiags.NonFatalError); isNF { - diags = diags.Append(nf.Diagnostics) - } else { - diags = diags.Append(err) - return g, diags - } - } - } - - // Validate the graph structure - if b.Validate { - if err := g.Validate(); err != nil { - log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - diags = diags.Append(err) - return nil, diags - } - } - - return g, diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go deleted file mode 100644 index 00ddcf5c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ /dev/null @@ -1,215 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ApplyGraphBuilder implements GraphBuilder and is responsible for building -// a graph for applying a Terraform diff. -// -// Because the graph is built from the diff (vs. the config or state), -// this helps ensure that the apply-time graph doesn't modify any resources -// that aren't explicitly in the diff. There are other scenarios where the -// diff can be deviated, so this is just one layer of protection. -type ApplyGraphBuilder struct { - // Config is the configuration tree that the diff was built from. - Config *configs.Config - - // Changes describes the changes that we need apply. - Changes *plans.Changes - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target. This is only required to make sure - // unnecessary outputs aren't included in the apply graph. The plan - // builder successfully handles targeting resources. In the future, - // outputs should go into the diff so that this is unnecessary. - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Destroy, if true, represents a pure destroy operation - Destroy bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "ApplyGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *ApplyGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeApplyableResource{ - NodeAbstractResource: a, - } - } - - concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeDestroyResource{ - NodeAbstractResource: a, - } - } - - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeApplyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config. During apply, - // we use this just to ensure that the whole-resource metadata is - // updated to reflect things such as whether the count argument is - // set in config, or which provider configuration manages each resource. - &ConfigTransformer{ - Concrete: concreteResource, - Config: b.Config, - }, - - // Creates all the resource instances represented in the diff, along - // with dependency edges against the whole-resource nodes added by - // ConfigTransformer above. - &DiffTransformer{ - Concrete: concreteResourceInstance, - State: b.State, - Changes: b.Changes, - }, - - // Creates extra cleanup nodes for any entire resources that are - // no longer present in config, so we can make sure we clean up the - // leftover empty resource states after the instances have been - // destroyed. - // (We don't track this particular type of change in the plan because - // it's just cleanup of our own state object, and so doesn't effect - // any real remote objects or consumable outputs.) - &OrphanResourceTransformer{ - Concrete: concreteOrphanResource, - Config: b.Config, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{Config: b.Config, State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // add providers - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - &AttachDependenciesTransformer{}, - - // Destruction ordering - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - &CBDEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - Destroy: b.Destroy, - }, - - // Handle destroy time transformations for output and local values. - // Reverse the edges from outputs and locals, so that - // interpolations don't fail during destroy. - // Create a destroy node for outputs to remove them from the state. - GraphTransformIf( - func() bool { return b.Destroy }, - GraphTransformMulti( - &DestroyValueReferenceTransformer{}, - &DestroyOutputTransformer{}, - ), - ), - - // Prune unreferenced values, which may have interpolations that can't - // be resolved. - &PruneUnusedValuesTransformer{ - Destroy: b.Destroy, - }, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{Targets: b.Targets}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go deleted file mode 100644 index a6047a9b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go +++ /dev/null @@ -1,97 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for -// planning a pure-destroy. -// -// Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. -type DestroyPlanGraphBuilder struct { - // Config is the configuration tree to build the plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "DestroyPlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlanDestroyableResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates nodes for the resource instances tracked in the state. - &StateTransformer{ - ConcreteCurrent: concreteResourceInstance, - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Destruction ordering. We require this only so that - // targeting below will prune the correct things. - &DestroyEdgeTransformer{ - Config: b.Config, - State: b.State, - Schemas: b.Schemas, - }, - - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. - &TargetsTransformer{Targets: b.Targets}, - - // Single root - &RootTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go deleted file mode 100644 index eb6c897b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go +++ /dev/null @@ -1,108 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable -// for evaluating in-memory values (input variables, local values, output -// values) in the state without any other side-effects. -// -// This graph is used only in weird cases, such as the "terraform console" -// CLI command, where we need to evaluate expressions against the state -// without taking any other actions. -// -// The generated graph will include nodes for providers, resources, etc -// just to allow indirect dependencies to be resolved, but these nodes will -// not take any actions themselves since we assume that their parts of the -// state, if any, are already complete. -// -// Although the providers are never configured, they must still be available -// in order to obtain schema information used for type checking, etc. -type EvalGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "EvalGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *EvalGraphBuilder) Steps() []GraphTransformer { - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeEvalableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: nil, // just use the abstract type - Config: b.Config, - Unique: true, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Although we don't configure providers, we do still start them up - // to get their schemas, and so we must shut them down again here. - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Remove redundant edges to simplify the graph. - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go deleted file mode 100644 index 49879e4e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportGraphBuilder implements GraphBuilder and is responsible for building -// a graph for importing resources into Terraform. This is a much, much -// simpler graph than a normal configuration graph. -type ImportGraphBuilder struct { - // ImportTargets are the list of resources to import. - ImportTargets []*ImportTarget - - // Module is a configuration to build the graph from. See ImportOpts.Config. - Config *configs.Config - - // Components is the factory for our available plugin components. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas -} - -// Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: true, - Name: "ImportGraphBuilder", - }).Build(path) -} - -// Steps returns the ordered list of GraphTransformers that must be executed -// to build a complete graph. -func (b *ImportGraphBuilder) Steps() []GraphTransformer { - // Get the module. If we don't have one, we just use an empty tree - // so that the transform still works but does nothing. - config := b.Config - if config == nil { - config = configs.NewEmptyConfig() - } - - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - steps := []GraphTransformer{ - // Create all our resources from the configuration and state - &ConfigTransformer{Config: config}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add the import steps - &ImportStateTransformer{Targets: b.ImportTargets}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), - - // This validates that the providers only depend on variables - &ImportProviderValidateTransformer{}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - - // Optimize - &TransitiveReductionTransformer{}, - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go deleted file mode 100644 index 17adfd27..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ /dev/null @@ -1,204 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// PlanGraphBuilder implements GraphBuilder and is responsible for building -// a graph for planning (creating a Terraform Diff). -// -// The primary difference between this graph and others: -// -// * Based on the config since it represents the target state -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type PlanGraphBuilder struct { - // Config is the configuration tree to build a plan from. - Config *configs.Config - - // State is the current state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool - - // CustomConcrete can be set to customize the node types created - // for various parts of the plan. This is useful in order to customize - // the plan behavior. - CustomConcrete bool - ConcreteProvider ConcreteProviderNodeFunc - ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc - - once sync.Once -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "PlanGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *PlanGraphBuilder) Steps() []GraphTransformer { - b.once.Do(b.init) - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - steps := []GraphTransformer{ - // Creates all the resources represented in the config - &ConfigTransformer{ - Concrete: b.ConcreteResource, - Config: b.Config, - }, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add orphan resources - &OrphanResourceInstanceTransformer{ - Concrete: b.ConcreteResourceOrphan, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can plan to destroy them. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Create orphan output nodes - &OrphanOutputTransformer{ - Config: b.Config, - State: b.State, - }, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - - // Add module variables - &ModuleVariableTransformer{ - Config: b.Config, - }, - - TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), - - // Remove modules no longer present in the config - &RemovedModuleTransformer{Config: b.Config, State: b.State}, - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - - // Add the node to fix the state count boundaries - &CountBoundaryTransformer{ - Config: b.Config, - }, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Detect when create_before_destroy must be forced on for a particular - // node due to dependency edges, to avoid graph cycles during apply. - &ForcedCBDTransformer{}, - - // Close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} - -func (b *PlanGraphBuilder) init() { - // Do nothing if the user requests customizing the fields - if b.CustomConcrete { - return - } - - b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResource{ - NodeAbstractResource: a, - } - } - - b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go deleted file mode 100644 index 1c7ae489..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ /dev/null @@ -1,195 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// RefreshGraphBuilder implements GraphBuilder and is responsible for building -// a graph for refreshing (updating the Terraform state). -// -// The primary difference between this graph and others: -// -// * Based on the state since it represents the only resources that -// need to be refreshed. -// -// * Ignores lifecycle options since no lifecycle events occur here. This -// simplifies the graph significantly since complex transforms such as -// create-before-destroy can be completely ignored. -// -type RefreshGraphBuilder struct { - // Config is the configuration tree. - Config *configs.Config - - // State is the prior state - State *states.State - - // Components is a factory for the plug-in components (providers and - // provisioners) available for use. - Components contextComponentFactory - - // Schemas is the repository of schemas we will draw from to analyse - // the configuration. - Schemas *Schemas - - // Targets are resources to target - Targets []addrs.Targetable - - // DisableReduce, if true, will not reduce the graph. Great for testing. - DisableReduce bool - - // Validate will do structural validation of the graph. - Validate bool -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { - return (&BasicGraphBuilder{ - Steps: b.Steps(), - Validate: b.Validate, - Name: "RefreshGraphBuilder", - }).Build(path) -} - -// See GraphBuilder -func (b *RefreshGraphBuilder) Steps() []GraphTransformer { - // Custom factory for creating providers. - concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableManagedResource{ - NodeAbstractResource: a, - } - } - - concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { - // The "Plan" node type also handles refreshing behavior. - return &NodePlanDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: a, - DeposedKey: key, - } - } - - concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableDataResource{ - NodeAbstractResource: a, - } - } - - steps := []GraphTransformer{ - // Creates all the managed resources that aren't in the state, but only if - // we have a state already. No resources in state means there's not - // anything to refresh. - func() GraphTransformer { - if b.State.HasResources() { - return &ConfigTransformer{ - Concrete: concreteManagedResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.ManagedResourceMode, - } - } - log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") - return nil - }(), - - // Creates all the data resources that aren't in the state. This will also - // add any orphans from scaling in as destroy nodes. - &ConfigTransformer{ - Concrete: concreteDataResource, - Config: b.Config, - Unique: true, - ModeFilter: true, - Mode: addrs.DataResourceMode, - }, - - // Add any fully-orphaned resources from config (ones that have been - // removed completely, not ones that are just orphaned due to a scaled-in - // count. - &OrphanResourceInstanceTransformer{ - Concrete: concreteManagedResourceInstance, - State: b.State, - Config: b.Config, - }, - - // We also need nodes for any deposed instance objects present in the - // state, so we can check if they still exist. (This intentionally - // skips creating nodes for _current_ objects, since ConfigTransformer - // created nodes that will do that during DynamicExpand.) - &StateTransformer{ - ConcreteDeposed: concreteResourceInstanceDeposed, - State: b.State, - }, - - // Attach the state - &AttachStateTransformer{State: b.State}, - - // Attach the configuration to any resources - &AttachResourceConfigTransformer{Config: b.Config}, - - // Add root variables - &RootVariableTransformer{Config: b.Config}, - - // Add the local values - &LocalTransformer{Config: b.Config}, - - // Add the outputs - &OutputTransformer{Config: b.Config}, - - // Add module variables - &ModuleVariableTransformer{Config: b.Config}, - - TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: b.Schemas}, - - // Connect so that the references are ready for targeting. We'll - // have to connect again later for providers and so on. - &ReferenceTransformer{}, - &AttachDependenciesTransformer{}, - - // Target - &TargetsTransformer{ - Targets: b.Targets, - - // Resource nodes from config have not yet been expanded for - // "count", so we must apply targeting without indices. Exact - // targeting will be dealt with later when these resources - // DynamicExpand. - IgnoreIndices: true, - }, - - // Close opened plugin connections - &CloseProviderTransformer{}, - - // Single root - &RootTransformer{}, - } - - if !b.DisableReduce { - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - steps = append(steps, &TransitiveReductionTransformer{}) - } - - return steps -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go deleted file mode 100644 index 1881f95f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go +++ /dev/null @@ -1,34 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// ValidateGraphBuilder creates the graph for the validate operation. -// -// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that -// we only have to validate what we'd normally plan anyways. The -// PlanGraphBuilder given will be modified so it shouldn't be used for anything -// else after calling this function. -func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { - // We're going to customize the concrete functions - p.CustomConcrete = true - - // Set the provider to the normal provider. This will ask for input. - p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{ - NodeAbstractProvider: a, - } - } - - p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { - return &NodeValidatableResource{ - NodeAbstractResource: a, - } - } - - // We purposely don't set any other concrete types since they don't - // require validation. - - return p -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go deleted file mode 100644 index 73e3821f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/dag" - -// GraphDot returns the dot formatting of a visual representation of -// the given Terraform graph. -func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { - return string(g.Dot(opts)), nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go deleted file mode 100644 index 768590fb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" -) - -// GraphNodeSubPath says that a node is part of a graph with a -// different path, and the context should be adjusted accordingly. -type GraphNodeSubPath interface { - Path() addrs.ModuleInstance -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go deleted file mode 100644 index e980e0c6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go +++ /dev/null @@ -1,32 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// GraphWalker is an interface that can be implemented that when used -// with Graph.Walk will invoke the given callbacks under certain events. -type GraphWalker interface { - EnterPath(addrs.ModuleInstance) EvalContext - ExitPath(addrs.ModuleInstance) - EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, tfdiags.Diagnostics) - EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics -} - -// NullGraphWalker is a GraphWalker implementation that does nothing. -// This can be embedded within other GraphWalker implementations for easily -// implementing all the required functions. -type NullGraphWalker struct{} - -func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} -func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} -func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go deleted file mode 100644 index 03c192a8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ /dev/null @@ -1,157 +0,0 @@ -package terraform - -import ( - "context" - "log" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ContextGraphWalker is the GraphWalker implementation used with the -// Context struct to walk and evaluate the graph. -type ContextGraphWalker struct { - NullGraphWalker - - // Configurable values - Context *Context - State *states.SyncState // Used for safe concurrent access to state - Changes *plans.ChangesSync // Used for safe concurrent writes to changes - Operation walkOperation - StopContext context.Context - RootVariableValues InputValues - - // This is an output. Do not set this, nor read it while a graph walk - // is in progress. - NonFatalDiagnostics tfdiags.Diagnostics - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - variableValues map[string]map[string]cty.Value - variableValuesLock sync.Mutex - providerCache map[string]providers.Interface - providerSchemas map[string]*ProviderSchema - providerLock sync.Mutex - provisionerCache map[string]provisioners.Interface - provisionerSchemas map[string]*configschema.Block - provisionerLock sync.Mutex -} - -func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { - w.once.Do(w.init) - - w.contextLock.Lock() - defer w.contextLock.Unlock() - - // If we already have a context for this path cached, use that - key := path.String() - if ctx, ok := w.contexts[key]; ok { - return ctx - } - - // Our evaluator shares some locks with the main context and the walker - // so that we can safely run multiple evaluations at once across - // different modules. - evaluator := &Evaluator{ - Meta: w.Context.meta, - Config: w.Context.config, - Operation: w.Operation, - State: w.State, - Changes: w.Changes, - Schemas: w.Context.schemas, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - ctx := &BuiltinEvalContext{ - StopContext: w.StopContext, - PathValue: path, - Hooks: w.Context.hooks, - InputValue: w.Context.uiInput, - Components: w.Context.components, - Schemas: w.Context.schemas, - ProviderCache: w.providerCache, - ProviderInputConfig: w.Context.providerInputConfig, - ProviderLock: &w.providerLock, - ProvisionerCache: w.provisionerCache, - ProvisionerLock: &w.provisionerLock, - ChangesValue: w.Changes, - StateValue: w.State, - Evaluator: evaluator, - VariableValues: w.variableValues, - VariableValuesLock: &w.variableValuesLock, - } - - w.contexts[key] = ctx - return ctx -} - -func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) - - // Acquire a lock on the semaphore - w.Context.parallelSem.Acquire() - - // We want to filter the evaluation tree to only include operations - // that belong in this operation. - return EvalFilter(n, EvalNodeFilterOp(w.Operation)) -} - -func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) - - // Release the semaphore - w.Context.parallelSem.Release() - - if err == nil { - return nil - } - - // Acquire the lock because anything is going to require a lock. - w.errorLock.Lock() - defer w.errorLock.Unlock() - - // If the error is non-fatal then we'll accumulate its diagnostics in our - // non-fatal list, rather than returning it directly, so that the graph - // walk can continue. - if nferr, ok := err.(tfdiags.NonFatalError); ok { - log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) - w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) - return nil - } - - // Otherwise, we'll let our usual diagnostics machinery figure out how to - // unpack this as one or more diagnostic messages and return that. If we - // get down here then the returned diagnostics will contain at least one - // error, causing the graph walk to halt. - var diags tfdiags.Diagnostics - diags = diags.Append(err) - return diags -} - -func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext) - w.providerCache = make(map[string]providers.Interface) - w.providerSchemas = make(map[string]*ProviderSchema) - w.provisionerCache = make(map[string]provisioners.Interface) - w.provisionerSchemas = make(map[string]*configschema.Block) - w.variableValues = make(map[string]map[string]cty.Value) - - // Populate root module variable values. Other modules will be populated - // during the graph walk. - w.variableValues[""] = make(map[string]cty.Value) - for k, iv := range w.RootVariableValues { - w.variableValues[""][k] = iv.Value - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go deleted file mode 100644 index 859f6fb1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go +++ /dev/null @@ -1,18 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go - -// walkOperation is an enum which tells the walkContext what to do. -type walkOperation byte - -const ( - walkInvalid walkOperation = iota - walkApply - walkPlan - walkPlanDestroy - walkRefresh - walkValidate - walkDestroy - walkImport - walkEval // used just to prepare EvalContext for expression evaluation, with no other actions -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go deleted file mode 100644 index b51e1a26..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[GraphTypeInvalid-0] - _ = x[GraphTypeLegacy-1] - _ = x[GraphTypeRefresh-2] - _ = x[GraphTypePlan-3] - _ = x[GraphTypePlanDestroy-4] - _ = x[GraphTypeApply-5] - _ = x[GraphTypeValidate-6] - _ = x[GraphTypeEval-7] -} - -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" - -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} - -func (i GraphType) String() string { - if i >= GraphType(len(_GraphType_index)-1) { - return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go deleted file mode 100644 index c0bb23ab..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook.go +++ /dev/null @@ -1,161 +0,0 @@ -package terraform - -import ( - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// HookAction is an enum of actions that can be taken as a result of a hook -// callback. This allows you to modify the behavior of Terraform at runtime. -type HookAction byte - -const ( - // HookActionContinue continues with processing as usual. - HookActionContinue HookAction = iota - - // HookActionHalt halts immediately: no more hooks are processed - // and the action that Terraform was about to take is cancelled. - HookActionHalt -) - -// Hook is the interface that must be implemented to hook into various -// parts of Terraform, allowing you to inspect or change behavior at runtime. -// -// There are MANY hook points into Terraform. If you only want to implement -// some hook points, but not all (which is the likely case), then embed the -// NilHook into your struct, which implements all of the interface but does -// nothing. Then, override only the functions you want to implement. -type Hook interface { - // PreApply and PostApply are called before and after an action for a - // single instance is applied. The error argument in PostApply is the - // error, if any, that was returned from the provider Apply call itself. - PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a provider is given - // the opportunity to customize the proposed new state to produce the - // planned new state. - PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) - PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) - - // The provisioning hooks signal both the overall start end end of - // provisioning for a particular instance and of each of the individual - // configured provisioners for each instance. The sequence of these - // for a given instance might look something like this: - // - // PreProvisionInstance(aws_instance.foo[1], ...) - // PreProvisionInstanceStep(aws_instance.foo[1], "file") - // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) - // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") - // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") - // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) - // PostProvisionInstance(aws_instance.foo[1], ...) - // - // ProvisionOutput is called with output sent back by the provisioners. - // This will be called multiple times as output comes in, with each call - // representing one line of output. It cannot control whether the - // provisioner continues running. - PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) - PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) - PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) - ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) - - // PreRefresh and PostRefresh are called before and after a single - // resource state is refreshed, respectively. - PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) - PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) - - // PreImportState and PostImportState are called before and after - // (respectively) each state import operation for a given resource address. - PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) - PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) - - // PostStateUpdate is called each time the state is updated. It receives - // a deep copy of the state, which it may therefore access freely without - // any need for locks to protect from concurrent writes from the caller. - PostStateUpdate(new *states.State) (HookAction, error) -} - -// NilHook is a Hook implementation that does nothing. It exists only to -// simplify implementing hooks. You can embed this into your Hook implementation -// and only implement the functions you are interested in. -type NilHook struct{} - -var _ Hook = (*NilHook)(nil) - -func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return HookActionContinue, nil -} - -func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { - return HookActionContinue, nil -} - -// handleHook turns hook actions into panics. This lets you use the -// panic/recover mechanism in Go as a flow control mechanism for hook -// actions. -func handleHook(a HookAction, err error) { - if err != nil { - // TODO: handle errors - } - - switch a { - case HookActionContinue: - return - case HookActionHalt: - panic(HookActionHalt) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go deleted file mode 100644 index 6efa3196..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go +++ /dev/null @@ -1,274 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// MockHook is an implementation of Hook that can be used for tests. -// It records all of its function calls. -type MockHook struct { - sync.Mutex - - PreApplyCalled bool - PreApplyAddr addrs.AbsResourceInstance - PreApplyGen states.Generation - PreApplyAction plans.Action - PreApplyPriorState cty.Value - PreApplyPlannedState cty.Value - PreApplyReturn HookAction - PreApplyError error - - PostApplyCalled bool - PostApplyAddr addrs.AbsResourceInstance - PostApplyGen states.Generation - PostApplyNewState cty.Value - PostApplyError error - PostApplyReturn HookAction - PostApplyReturnError error - PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) - - PreDiffCalled bool - PreDiffAddr addrs.AbsResourceInstance - PreDiffGen states.Generation - PreDiffPriorState cty.Value - PreDiffProposedState cty.Value - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffAddr addrs.AbsResourceInstance - PostDiffGen states.Generation - PostDiffAction plans.Action - PostDiffPriorState cty.Value - PostDiffPlannedState cty.Value - PostDiffReturn HookAction - PostDiffError error - - PreProvisionInstanceCalled bool - PreProvisionInstanceAddr addrs.AbsResourceInstance - PreProvisionInstanceState cty.Value - PreProvisionInstanceReturn HookAction - PreProvisionInstanceError error - - PostProvisionInstanceCalled bool - PostProvisionInstanceAddr addrs.AbsResourceInstance - PostProvisionInstanceState cty.Value - PostProvisionInstanceReturn HookAction - PostProvisionInstanceError error - - PreProvisionInstanceStepCalled bool - PreProvisionInstanceStepAddr addrs.AbsResourceInstance - PreProvisionInstanceStepProvisionerType string - PreProvisionInstanceStepReturn HookAction - PreProvisionInstanceStepError error - - PostProvisionInstanceStepCalled bool - PostProvisionInstanceStepAddr addrs.AbsResourceInstance - PostProvisionInstanceStepProvisionerType string - PostProvisionInstanceStepErrorArg error - PostProvisionInstanceStepReturn HookAction - PostProvisionInstanceStepError error - - ProvisionOutputCalled bool - ProvisionOutputAddr addrs.AbsResourceInstance - ProvisionOutputProvisionerType string - ProvisionOutputMessage string - - PreRefreshCalled bool - PreRefreshAddr addrs.AbsResourceInstance - PreRefreshGen states.Generation - PreRefreshPriorState cty.Value - PreRefreshReturn HookAction - PreRefreshError error - - PostRefreshCalled bool - PostRefreshAddr addrs.AbsResourceInstance - PostRefreshGen states.Generation - PostRefreshPriorState cty.Value - PostRefreshNewState cty.Value - PostRefreshReturn HookAction - PostRefreshError error - - PreImportStateCalled bool - PreImportStateAddr addrs.AbsResourceInstance - PreImportStateID string - PreImportStateReturn HookAction - PreImportStateError error - - PostImportStateCalled bool - PostImportStateAddr addrs.AbsResourceInstance - PostImportStateNewStates []providers.ImportedResource - PostImportStateReturn HookAction - PostImportStateError error - - PostStateUpdateCalled bool - PostStateUpdateState *states.State - PostStateUpdateReturn HookAction - PostStateUpdateError error -} - -var _ Hook = (*MockHook)(nil) - -func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreApplyCalled = true - h.PreApplyAddr = addr - h.PreApplyGen = gen - h.PreApplyAction = action - h.PreApplyPriorState = priorState - h.PreApplyPlannedState = plannedNewState - return h.PreApplyReturn, h.PreApplyError -} - -func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostApplyCalled = true - h.PostApplyAddr = addr - h.PostApplyGen = gen - h.PostApplyNewState = newState - h.PostApplyError = err - - if h.PostApplyFn != nil { - return h.PostApplyFn(addr, gen, newState, err) - } - - return h.PostApplyReturn, h.PostApplyReturnError -} - -func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreDiffCalled = true - h.PreDiffAddr = addr - h.PreDiffGen = gen - h.PreDiffPriorState = priorState - h.PreDiffProposedState = proposedNewState - return h.PreDiffReturn, h.PreDiffError -} - -func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostDiffCalled = true - h.PostDiffAddr = addr - h.PostDiffGen = gen - h.PostDiffAction = action - h.PostDiffPriorState = priorState - h.PostDiffPlannedState = plannedNewState - return h.PostDiffReturn, h.PostDiffError -} - -func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceCalled = true - h.PreProvisionInstanceAddr = addr - h.PreProvisionInstanceState = state - return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError -} - -func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceCalled = true - h.PostProvisionInstanceAddr = addr - h.PostProvisionInstanceState = state - return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError -} - -func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreProvisionInstanceStepCalled = true - h.PreProvisionInstanceStepAddr = addr - h.PreProvisionInstanceStepProvisionerType = typeName - return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError -} - -func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostProvisionInstanceStepCalled = true - h.PostProvisionInstanceStepAddr = addr - h.PostProvisionInstanceStepProvisionerType = typeName - h.PostProvisionInstanceStepErrorArg = err - return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError -} - -func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { - h.Lock() - defer h.Unlock() - - h.ProvisionOutputCalled = true - h.ProvisionOutputAddr = addr - h.ProvisionOutputProvisionerType = typeName - h.ProvisionOutputMessage = line -} - -func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreRefreshCalled = true - h.PreRefreshAddr = addr - h.PreRefreshGen = gen - h.PreRefreshPriorState = priorState - return h.PreRefreshReturn, h.PreRefreshError -} - -func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostRefreshCalled = true - h.PostRefreshAddr = addr - h.PostRefreshPriorState = priorState - h.PostRefreshNewState = newState - return h.PostRefreshReturn, h.PostRefreshError -} - -func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PreImportStateCalled = true - h.PreImportStateAddr = addr - h.PreImportStateID = importID - return h.PreImportStateReturn, h.PreImportStateError -} - -func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostImportStateCalled = true - h.PostImportStateAddr = addr - h.PostImportStateNewStates = imported - return h.PostImportStateReturn, h.PostImportStateError -} - -func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { - h.Lock() - defer h.Unlock() - - h.PostStateUpdateCalled = true - h.PostStateUpdateState = new - return h.PostStateUpdateReturn, h.PostStateUpdateError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go deleted file mode 100644 index 811fb337..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go +++ /dev/null @@ -1,100 +0,0 @@ -package terraform - -import ( - "sync/atomic" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// stopHook is a private Hook implementation that Terraform uses to -// signal when to stop or cancel actions. -type stopHook struct { - stop uint32 -} - -var _ Hook = (*stopHook)(nil) - -func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { -} - -func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { - return h.hook() -} - -func (h *stopHook) hook() (HookAction, error) { - if h.Stopped() { - // FIXME: This should really return an error since stopping partway - // through is not a successful run-to-completion, but we'll need to - // introduce that cautiously since existing automation solutions may - // be depending on this behavior. - return HookActionHalt, nil - } - - return HookActionContinue, nil -} - -// reset should be called within the lock context -func (h *stopHook) Reset() { - atomic.StoreUint32(&h.stop, 0) -} - -func (h *stopHook) Stop() { - atomic.StoreUint32(&h.stop, 1) -} - -func (h *stopHook) Stopped() bool { - return atomic.LoadUint32(&h.stop) == 1 -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go deleted file mode 100644 index 375a8638..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go +++ /dev/null @@ -1,13 +0,0 @@ -package terraform - -//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go - -// InstanceType is an enum of the various types of instances store in the State -type InstanceType int - -const ( - TypeInvalid InstanceType = iota - TypePrimary - TypeTainted - TypeDeposed -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go deleted file mode 100644 index 95b7a980..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TypeInvalid-0] - _ = x[TypePrimary-1] - _ = x[TypeTainted-2] - _ = x[TypeDeposed-3] -} - -const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" - -var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} - -func (i InstanceType) String() string { - if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go deleted file mode 100644 index 0eecf9f4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go +++ /dev/null @@ -1,201 +0,0 @@ -package terraform - -import ( - version "github.com/hashicorp/go-version" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/moduledeps" - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/states" -) - -// ConfigTreeDependencies returns the dependencies of the tree of modules -// described by the given configuration and state. -// -// Both configuration and state are required because there can be resources -// implied by instances in the state that no longer exist in config. -func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { - // First we walk the configuration tree to build the overall structure - // and capture the explicit/implicit/inherited provider dependencies. - deps := configTreeConfigDependencies(root, nil) - - // Next we walk over the resources in the state to catch any additional - // dependencies created by existing resources that are no longer in config. - // Most things we find in state will already be present in 'deps', but - // we're interested in the rare thing that isn't. - configTreeMergeStateDependencies(deps, state) - - return deps -} - -func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { - if root == nil { - // If no config is provided, we'll make a synthetic root. - // This isn't necessarily correct if we're called with a nil that - // *isn't* at the root, but in practice that can never happen. - return &moduledeps.Module{ - Name: "root", - Providers: make(moduledeps.Providers), - } - } - - name := "root" - if len(root.Path) != 0 { - name = root.Path[len(root.Path)-1] - } - - ret := &moduledeps.Module{ - Name: name, - } - - module := root.Module - - // Provider dependencies - { - providers := make(moduledeps.Providers) - - // The main way to declare a provider dependency is explicitly inside - // the "terraform" block, which allows declaring a requirement without - // also creating a configuration. - for fullName, req := range module.ProviderRequirements { - inst := moduledeps.ProviderInstance(fullName) - // The handling here is a bit fiddly because the moduledeps package - // was designed around the legacy (pre-0.12) configuration model - // and hasn't yet been revised to handle the new model. As a result, - // we need to do some translation here. - // FIXME: Eventually we should adjust the underlying model so we - // can also retain the source location of each constraint, for - // more informative output from the "terraform providers" command. - var rawConstraints version.Constraints - for _, constraint := range req.VersionConstraints { - rawConstraints = append(rawConstraints, constraint.Required...) - } - discoConstraints := discovery.NewConstraints(rawConstraints) - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discoConstraints, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - - // Provider configurations can also include version constraints, - // allowing for more terse declaration in situations where both a - // configuration and a constraint are defined in the same module. - for fullName, pCfg := range module.ProviderConfigs { - inst := moduledeps.ProviderInstance(fullName) - discoConstraints := discovery.AllVersions - if pCfg.Version.Required != nil { - discoConstraints = discovery.NewConstraints(pCfg.Version.Required) - } - if existing, exists := providers[inst]; exists { - existing.Constraints = existing.Constraints.Append(discoConstraints) - } else { - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discoConstraints, - Reason: moduledeps.ProviderDependencyExplicit, - } - } - } - - // Each resource in the configuration creates an *implicit* provider - // dependency, though we'll only record it if there isn't already - // an explicit dependency on the same provider. - for _, rc := range module.ManagedResources { - addr := rc.ProviderConfigAddr() - inst := moduledeps.ProviderInstance(addr.StringCompact()) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[addr.StringCompact()]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - for _, rc := range module.DataResources { - addr := rc.ProviderConfigAddr() - inst := moduledeps.ProviderInstance(addr.StringCompact()) - if _, exists := providers[inst]; exists { - // Explicit dependency already present - continue - } - - reason := moduledeps.ProviderDependencyImplicit - if _, inherited := inheritProviders[addr.String()]; inherited { - reason = moduledeps.ProviderDependencyInherited - } - - providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: reason, - } - } - - ret.Providers = providers - } - - childInherit := make(map[string]*configs.Provider) - for k, v := range inheritProviders { - childInherit[k] = v - } - for k, v := range module.ProviderConfigs { - childInherit[k] = v - } - for _, c := range root.Children { - ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) - } - - return ret -} - -func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { - if state == nil { - return - } - - findModule := func(path addrs.ModuleInstance) *moduledeps.Module { - module := root - for _, step := range path { - var next *moduledeps.Module - for _, cm := range module.Children { - if cm.Name == step.Name { - next = cm - break - } - } - - if next == nil { - // If we didn't find a next node, we'll need to make one - next = &moduledeps.Module{ - Name: step.Name, - Providers: make(moduledeps.Providers), - } - module.Children = append(module.Children, next) - } - - module = next - } - return module - } - - for _, ms := range state.Modules { - module := findModule(ms.Addr) - - for _, rs := range ms.Resources { - inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) - if _, exists := module.Providers[inst]; !exists { - module.Providers[inst] = moduledeps.ProviderDependency{ - Constraints: discovery.AllVersions, - Reason: moduledeps.ProviderDependencyFromState, - } - } - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go deleted file mode 100644 index e4952039..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go +++ /dev/null @@ -1,22 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs" -) - -// NodeCountBoundary fixes up any transitions between "each modes" in objects -// saved in state, such as switching from NoEach to EachInt. -type NodeCountBoundary struct { - Config *configs.Config -} - -func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (EachMode fixup)" -} - -// GraphNodeEvalable -func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{ - Config: n.Config, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go deleted file mode 100644 index 6ba39904..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": -// it is ready to be destroyed. -type NodeDestroyableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var providerSchema *ProviderSchema - // We don't need the provider, but we're calling EvalGetProvider to load the - // schema. - var provider providers.Interface - - // Just destroy it. - var state *states.ResourceInstanceObject - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go deleted file mode 100644 index 7133d42b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ /dev/null @@ -1,228 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" -) - -// NodeRefreshableDataResource represents a resource that is "refreshable". -type NodeRefreshableDataResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) - _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) -) - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - if !countKnown { - // If the count isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx) - diags = diags.Append(forEachDiags) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - if !forEachKnown { - // If the for_each isn't known yet, we'll skip refreshing and try expansion - // again during the plan walk. - return nil, nil - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // We also need a destroyable resource for orphans that are a result of a - // scaled-in count. - concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and provider since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - - return &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans. As these are orphaned refresh nodes, we add them - // directly as NodeDestroyableDataResource. - &OrphanResourceCountTransformer{ - Concrete: concreteResourceDestroyable, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableDataResource", - } - - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableDataResourceInstance represents a single resource instance -// that is refreshable. -type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResourceInstance -} - -// GraphNodeEvalable -func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // These variables are the state for the eval sequence below, and are - // updated through pointers. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var configVal cty.Value - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Always destroy the existing state first, since we must - // make sure that values from a previous read will not - // get interpolated if we end up needing to defer our - // loading until apply time. - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, // a pointer to nil, here - ProviderSchema: &providerSchema, - }, - - // EvalReadData will _attempt_ to read the data source, but may - // generate an incomplete planned object if the configuration - // includes values that won't be known until apply. - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - OutputChange: &change, - OutputConfigValue: &configVal, - OutputState: &state, - // If the config explicitly has a depends_on for this data - // source, assume the intention is to prevent refreshing ahead - // of that dependency, and therefore we need to deal with this - // resource during the apply phase. We do that by forcing this - // read to result in a plan. - ForcePlanRead: len(n.Config.DependsOn) > 0, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return (*state).Status != states.ObjectPlanned, nil - }, - Then: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalUpdateStateHook{}, - }, - }, - Else: &EvalSequence{ - // We can't deal with this yet, so we'll repeat this step - // during the plan walk to produce a planned change to read - // this during the apply walk. However, we do still need to - // save the generated change and partial state so that - // results from it can be included in other data resources - // or provider configurations during the refresh walk. - // (The planned object we save in the state here will be - // pruned out at the end of the refresh walk, returning - // it back to being unset again for subsequent walks.) - Nodes: []EvalNode{ - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - }, - }, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go deleted file mode 100644 index 591eb305..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_local.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// NodeLocal represents a named local value in a particular module. -// -// Local value nodes only have one operation, common to all walk types: -// evaluate the result and place it in state. -type NodeLocal struct { - Addr addrs.AbsLocalValue - Config *configs.Local -} - -var ( - _ GraphNodeSubPath = (*NodeLocal)(nil) - _ RemovableIfNotTargeted = (*NodeLocal)(nil) - _ GraphNodeReferenceable = (*NodeLocal)(nil) - _ GraphNodeReferencer = (*NodeLocal)(nil) - _ GraphNodeEvalable = (*NodeLocal)(nil) - _ dag.GraphNodeDotter = (*NodeLocal)(nil) -) - -func (n *NodeLocal) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeLocal) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeLocal) RemoveIfNotTargeted() bool { - return true -} - -// GraphNodeReferenceable -func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.LocalValue} -} - -// GraphNodeReferencer -func (n *NodeLocal) References() []*addrs.Reference { - refs, _ := lang.ReferencesInExpr(n.Config.Expr) - return appendResourceDestroyReferences(refs) -} - -// GraphNodeEvalable -func (n *NodeLocal) EvalTree() EvalNode { - return &EvalLocal{ - Addr: n.Addr.LocalValue, - Expr: n.Config.Expr, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go deleted file mode 100644 index 99e44090..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go +++ /dev/null @@ -1,89 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" -) - -// NodeModuleRemoved represents a module that is no longer in the -// config. -type NodeModuleRemoved struct { - Addr addrs.ModuleInstance -} - -var ( - _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) - _ RemovableIfNotTargeted = (*NodeModuleRemoved)(nil) - _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) - _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) - _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) -) - -func (n *NodeModuleRemoved) Name() string { - return fmt.Sprintf("%s (removed)", n.Addr.String()) -} - -// GraphNodeSubPath -func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { - return n.Addr -} - -// GraphNodeEvalable -func (n *NodeModuleRemoved) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalCheckModuleRemoved{ - Addr: n.Addr, - }, - } -} - -func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - // Our "References" implementation indicates that this node depends on - // the call to the module it represents, which implicitly depends on - // everything inside the module. That reference must therefore be - // interpreted in terms of our parent module. - return n.Addr, n.Addr.Parent() -} - -func (n *NodeModuleRemoved) References() []*addrs.Reference { - // We depend on the call to the module we represent, because that - // implicitly then depends on everything inside that module. - // Our ReferenceOutside implementation causes this to be interpreted - // within the parent module. - - _, call := n.Addr.CallInstance() - return []*addrs.Reference{ - { - Subject: call, - - // No source range here, because there's nothing reasonable for - // us to return. - }, - } -} - -// RemovableIfNotTargeted -func (n *NodeModuleRemoved) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// EvalCheckModuleRemoved is an EvalNode implementation that verifies that -// a module has been removed from the state as expected. -type EvalCheckModuleRemoved struct { - Addr addrs.ModuleInstance -} - -func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { - mod := ctx.State().Module(n.Addr) - if mod != nil { - // If we get here then that indicates a bug either in the states - // module or in an earlier step of the graph walk, since we should've - // pruned out the module when the last resource was removed from it. - return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) - } - return nil, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go deleted file mode 100644 index 7494e004..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ /dev/null @@ -1,150 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" - "github.com/zclconf/go-cty/cty" -) - -// NodeApplyableModuleVariable represents a module variable input during -// the apply step. -type NodeApplyableModuleVariable struct { - Addr addrs.AbsInputVariableInstance - Config *configs.Variable // Config is the var in the config - Expr hcl.Expression // Expr is the value expression given in the call -} - -// Ensure that we are implementing all of the interfaces we think we are -// implementing. -var ( - _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) - _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) -) - -func (n *NodeApplyableModuleVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { - // We execute in the parent scope (above our own module) because - // expressions in our value are resolved in that context. - return n.Addr.Module.Parent() -} - -// RemovableIfNotTargeted -func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - - // Module input variables have their value expressions defined in the - // context of their calling (parent) module, and so references from - // a node of this type should be resolved in the parent module instance. - referencePath = n.Addr.Module.Parent() - - // Input variables are _referenced_ from their own module, though. - selfPath = n.Addr.Module - - return // uses named return values -} - -// GraphNodeReferenceable -func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Variable} -} - -// GraphNodeReferencer -func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { - - // If we have no value expression, we cannot depend on anything. - if n.Expr == nil { - return nil - } - - // Variables in the root don't depend on anything, because their values - // are gathered prior to the graph walk and recorded in the context. - if len(n.Addr.Module) == 0 { - return nil - } - - // Otherwise, we depend on anything referenced by our value expression. - // We ignore diagnostics here under the assumption that we'll re-eval - // all these things later and catch them then; for our purposes here, - // we only care about valid references. - // - // Due to our GraphNodeReferenceOutside implementation, the addresses - // returned by this function are interpreted in the _parent_ module from - // where our associated variable was declared, which is correct because - // our value expression is assigned within a "module" block in the parent - // module. - refs, _ := lang.ReferencesInExpr(n.Expr) - return refs -} - -// GraphNodeEvalable -func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { - // If we have no value, do nothing - if n.Expr == nil { - return &EvalNoop{} - } - - // Otherwise, interpolate the value of this variable and set it - // within the variables mapping. - vals := make(map[string]cty.Value) - - _, call := n.Addr.Module.CallInstance() - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkValidate}, - Node: &EvalModuleCallArgument{ - Addr: n.Addr.Variable, - Config: n.Config, - Expr: n.Expr, - Values: vals, - - IgnoreDiagnostics: false, - }, - }, - - &EvalSetModuleCallArguments{ - Module: call, - Values: vals, - }, - - &evalVariableValidations{ - Addr: n.Addr, - Config: n.Config, - Expr: n.Expr, - - IgnoreDiagnostics: false, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go deleted file mode 100644 index bb3d0653..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ /dev/null @@ -1,200 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// NodeApplyableOutput represents an output that is "applyable": -// it is ready to be applied. -type NodeApplyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) - _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) - _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) - _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) -) - -func (n *NodeApplyableOutput) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeTargetDownstream -func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - // If any of the direct dependencies of an output are targeted then - // the output must always be targeted as well, so its value will always - // be up-to-date at the completion of an apply walk. - return true -} - -func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { - - // Output values have their expressions resolved in the context of the - // module where they are defined. - referencePath = addr.Module - - // ...but they are referenced in the context of their calling module. - selfPath = addr.Module.Parent() - - return // uses named return values - -} - -// GraphNodeReferenceOutside implementation -func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - return referenceOutsideForOutput(n.Addr) -} - -func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { - // An output in the root module can't be referenced at all. - if addr.Module.IsRoot() { - return nil - } - - // Otherwise, we can be referenced via a reference to our output name - // on the parent module's call, or via a reference to the entire call. - // e.g. module.foo.bar or just module.foo . - // Note that our ReferenceOutside method causes these addresses to be - // relative to the calling module, not the module where the output - // was declared. - _, outp := addr.ModuleCallOutput() - _, call := addr.Module.CallInstance() - return []addrs.Referenceable{outp, call} - -} - -// GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -func referencesForOutput(c *configs.Output) []*addrs.Reference { - impRefs, _ := lang.ReferencesInExpr(c.Expr) - expRefs, _ := lang.References(c.DependsOn) - l := len(impRefs) + len(expRefs) - if l == 0 { - return nil - } - refs := make([]*addrs.Reference, 0, l) - refs = append(refs, impRefs...) - refs = append(refs, expRefs...) - return refs - -} - -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []*addrs.Reference { - return appendResourceDestroyReferences(referencesForOutput(n.Config)) -} - -// GraphNodeEvalable -func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, - Node: &EvalWriteOutput{ - Addr: n.Addr.OutputValue, - Sensitive: n.Config.Sensitive, - Expr: n.Config.Expr, - }, - }, - }, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} - -// NodeDestroyableOutput represents an output that is "destroybale": -// its application will remove the output from the state. -type NodeDestroyableOutput struct { - Addr addrs.AbsOutputValue - Config *configs.Output // Config is the output in the config -} - -var ( - _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) - _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) - _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) - _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) - _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) - _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) -) - -func (n *NodeDestroyableOutput) Name() string { - return fmt.Sprintf("%s (destroy)", n.Addr.String()) -} - -// GraphNodeSubPath -func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// This will keep the destroy node in the graph if its corresponding output -// node is also in the destroy graph. -func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { - return true -} - -// GraphNodeReferencer -func (n *NodeDestroyableOutput) References() []*addrs.Reference { - return referencesForOutput(n.Config) -} - -// GraphNodeEvalable -func (n *NodeDestroyableOutput) EvalTree() EvalNode { - return &EvalDeleteOutput{ - Addr: n.Addr.OutputValue, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go deleted file mode 100644 index 518b8aa0..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" -) - -// NodeOutputOrphan represents an output that is an orphan. -type NodeOutputOrphan struct { - Addr addrs.AbsOutputValue -} - -var ( - _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) - _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) - _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) - _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) -) - -func (n *NodeOutputOrphan) Name() string { - return fmt.Sprintf("%s (orphan)", n.Addr.String()) -} - -// GraphNodeReferenceOutside implementation -func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { - return referenceOutsideForOutput(n.Addr) -} - -// GraphNodeReferenceable -func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { - return referenceableAddrsForOutput(n.Addr) -} - -// GraphNodeSubPath -func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable -func (n *NodeOutputOrphan) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, - Node: &EvalDeleteOutput{ - Addr: n.Addr.OutputValue, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go deleted file mode 100644 index 2071ab16..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go +++ /dev/null @@ -1,11 +0,0 @@ -package terraform - -// NodeApplyableProvider represents a provider during an apply. -type NodeApplyableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n, n.ProviderConfig()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go deleted file mode 100644 index a0cdcfe0..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ /dev/null @@ -1,96 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - - "github.com/hashicorp/terraform/dag" -) - -// ConcreteProviderNodeFunc is a callback type used to convert an -// abstract provider to a concrete one of some type. -type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex - -// NodeAbstractProvider represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeAbstractProvider struct { - Addr addrs.AbsProviderConfig - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Config *configs.Provider - Schema *configschema.Block -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) - _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) - _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) - _ GraphNodeProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) - _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) -) - -func (n *NodeAbstractProvider) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// RemovableIfNotTargeted -func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// GraphNodeReferencer -func (n *NodeAbstractProvider) References() []*addrs.Reference { - if n.Config == nil || n.Schema == nil { - return nil - } - - return ReferencesFromConfig(n.Config.Config, n.Schema) -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { - if n.Config == nil { - return nil - } - - return n.Config -} - -// GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { - n.Config = c -} - -// GraphNodeAttachProviderConfigSchema impl. -func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { - n.Schema = schema -} - -// GraphNodeDotter impl. -func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go deleted file mode 100644 index 30d8813a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go +++ /dev/null @@ -1,27 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// NodeDisabledProvider represents a provider that is disabled. A disabled -// provider does nothing. It exists to properly set inheritance information -// for child providers. -type NodeDisabledProvider struct { - *NodeAbstractProvider -} - -var ( - _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) - _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) - _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) - _ GraphNodeProvider = (*NodeDisabledProvider)(nil) - _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) - _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) -) - -func (n *NodeDisabledProvider) Name() string { - return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go deleted file mode 100644 index 1e4daabb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// NodeEvalableProvider represents a provider during an "eval" walk. -// This special provider node type just initializes a provider and -// fetches its schema, without configuring it or otherwise interacting -// with it. -type NodeEvalableProvider struct { - *NodeAbstractProvider -} - -// GraphNodeEvalable -func (n *NodeEvalableProvider) EvalTree() EvalNode { - addr := n.Addr - relAddr := addr.ProviderConfig - - return &EvalInitProvider{ - TypeName: relAddr.Type.LegacyString(), - Addr: addr.ProviderConfig, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go deleted file mode 100644 index cf51cf06..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" -) - -// NodeProvisioner represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeProvisioner struct { - NameValue string - PathValue addrs.ModuleInstance -} - -var ( - _ GraphNodeSubPath = (*NodeProvisioner)(nil) - _ GraphNodeProvisioner = (*NodeProvisioner)(nil) - _ GraphNodeEvalable = (*NodeProvisioner)(nil) -) - -func (n *NodeProvisioner) Name() string { - result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 0 { - result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeProvisioner) Path() addrs.ModuleInstance { - return n.PathValue -} - -// GraphNodeProvisioner -func (n *NodeProvisioner) ProvisionerName() string { - return n.NameValue -} - -// GraphNodeEvalable impl. -func (n *NodeProvisioner) EvalTree() EvalNode { - return &EvalInitProvisioner{Name: n.NameValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go deleted file mode 100644 index fd2cfc8e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ /dev/null @@ -1,421 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// ConcreteResourceNodeFunc is a callback type used to convert an -// abstract resource to a concrete one of some type. -type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex - -// GraphNodeResource is implemented by any nodes that represent a resource. -// The type of operation cannot be assumed, only that this node represents -// the given resource. -type GraphNodeResource interface { - ResourceAddr() addrs.AbsResource -} - -// ConcreteResourceInstanceNodeFunc is a callback type used to convert an -// abstract resource instance to a concrete one of some type. -type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex - -// GraphNodeResourceInstance is implemented by any nodes that represent -// a resource instance. A single resource may have multiple instances if, -// for example, the "count" or "for_each" argument is used for it in -// configuration. -type GraphNodeResourceInstance interface { - ResourceInstanceAddr() addrs.AbsResourceInstance - - // StateDependencies returns any inter-resource dependencies that are - // stored in the state. - StateDependencies() []addrs.AbsResource -} - -// NodeAbstractResource represents a resource that has no associated -// operations. It registers all the interfaces for a resource that common -// across multiple operation types. -type NodeAbstractResource struct { - Addr addrs.AbsResource // Addr is the address for this resource - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - - Schema *configschema.Block // Schema for processing the configuration body - SchemaVersion uint64 // Schema version of "Schema", as decided by the provider - Config *configs.Resource // Config is the resource in the config - - ProvisionerSchemas map[string]*configschema.Block - - Targets []addrs.Targetable // Set from GraphNodeTargetable - - // The address of the provider this resource will use - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractResource)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) - _ GraphNodeReferencer = (*NodeAbstractResource)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) - _ GraphNodeResource = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) - _ GraphNodeTargetable = (*NodeAbstractResource)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) -) - -// NewNodeAbstractResource creates an abstract resource graph node for -// the given absolute resource address. -func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { - return &NodeAbstractResource{ - Addr: addr, - } -} - -// NodeAbstractResourceInstance represents a resource instance with no -// associated operations. It embeds NodeAbstractResource but additionally -// contains an instance key, used to identify one of potentially many -// instances that were created from a resource in configuration, e.g. using -// the "count" or "for_each" arguments. -type NodeAbstractResourceInstance struct { - NodeAbstractResource - InstanceKey addrs.InstanceKey - - // The fields below will be automatically set using the Attach - // interfaces if you're running those transforms, but also be explicitly - // set if you already have that information. - ResourceState *states.Resource - Dependencies []addrs.AbsResource -} - -var ( - _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) - _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) - _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) -) - -// NewNodeAbstractResourceInstance creates an abstract resource instance graph -// node for the given absolute resource instance address. -func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { - // Due to the fact that we embed NodeAbstractResource, the given address - // actually ends up split between the resource address in the embedded - // object and the InstanceKey field in our own struct. The - // ResourceInstanceAddr method will stick these back together again on - // request. - return &NodeAbstractResourceInstance{ - NodeAbstractResource: NodeAbstractResource{ - Addr: addr.ContainingResource(), - }, - InstanceKey: addr.Resource.Key, - } -} - -func (n *NodeAbstractResource) Name() string { - return n.ResourceAddr().String() -} - -func (n *NodeAbstractResourceInstance) Name() string { - return n.ResourceInstanceAddr().String() -} - -// GraphNodeSubPath -func (n *NodeAbstractResource) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr.Resource} -} - -// GraphNodeReferenceable -func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - addr := n.ResourceInstanceAddr() - return []addrs.Referenceable{ - addr.Resource, - - // A resource instance can also be referenced by the address of its - // containing resource, so that e.g. a reference to aws_instance.foo - // would match both aws_instance.foo[0] and aws_instance.foo[1]. - addr.ContainingResource().Resource, - } -} - -// GraphNodeReferencer -func (n *NodeAbstractResource) References() []*addrs.Reference { - // If we have a config then we prefer to use that. - if c := n.Config; c != nil { - var result []*addrs.Reference - - for _, traversal := range c.DependsOn { - ref, diags := addrs.ParseRef(traversal) - if diags.HasErrors() { - // We ignore this here, because this isn't a suitable place to return - // errors. This situation should be caught and rejected during - // validation. - log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) - continue - } - - result = append(result, ref) - } - - if n.Schema == nil { - // Should never happens, but we'll log if it does so that we can - // see this easily when debugging. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - } - - refs, _ := lang.ReferencesInExpr(c.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(c.ForEach) - result = append(result, refs...) - refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) - result = append(result, refs...) - if c.Managed != nil { - if c.Managed.Connection != nil { - refs, _ = lang.ReferencesInBlock(c.Managed.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - for _, p := range c.Managed.Provisioners { - if p.When != configs.ProvisionerWhenCreate { - continue - } - if p.Connection != nil { - refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) - result = append(result, refs...) - } - - schema := n.ProvisionerSchemas[p.Type] - if schema == nil { - log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) - } - refs, _ = lang.ReferencesInBlock(p.Config, schema) - result = append(result, refs...) - } - } - return result - } - - // Otherwise, we have no references. - return nil -} - -// GraphNodeReferencer -func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { - // If we have a configuration attached then we'll delegate to our - // embedded abstract resource, which knows how to extract dependencies - // from configuration. If there is no config, then the dependencies will - // be connected during destroy from those stored in the state. - if n.Config != nil { - if n.Schema == nil { - // We'll produce a log message about this out here so that - // we can include the full instance address, since the equivalent - // message in NodeAbstractResource.References cannot see it. - log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) - return nil - } - return n.NodeAbstractResource.References() - } - - // FIXME: remove once the deprecated DependsOn values have been removed from state - // The state dependencies are now connected in a separate transformation as - // absolute addresses, but we need to keep this here until we can be sure - // that no state will need to use the old depends_on references. - if rs := n.ResourceState; rs != nil { - if s := rs.Instance(n.InstanceKey); s != nil { - // State is still storing dependencies as old-style strings, so we'll - // need to do a little work here to massage this to the form we now - // want. - var result []*addrs.Reference - - // It is (apparently) possible for s.Current to be nil. This proved - // difficult to reproduce, so we will fix the symptom here and hope - // to find the root cause another time. - // - // https://github.com/hashicorp/terraform/issues/21407 - if s.Current == nil { - log.Printf("[WARN] no current state found for %s", n.Name()) - return nil - } - for _, addr := range s.Current.DependsOn { - if addr == nil { - // Should never happen; indicates a bug in the state loader - panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) - } - - // This is a little weird: we need to manufacture an addrs.Reference - // with a fake range here because the state isn't something we can - // make source references into. - result = append(result, &addrs.Reference{ - Subject: addr, - SourceRange: tfdiags.SourceRange{ - Filename: "(state file)", - }, - }) - } - return result - } - } - - // If we have neither config nor state then we have no references. - return nil -} - -// converts an instance address to the legacy dotted notation -func dottedInstanceAddr(tr addrs.ResourceInstance) string { - // The legacy state format uses dot-separated instance keys, - // rather than bracketed as in our modern syntax. - var suffix string - switch tk := tr.Key.(type) { - case addrs.IntKey: - suffix = fmt.Sprintf(".%d", int(tk)) - case addrs.StringKey: - suffix = fmt.Sprintf(".%s", string(tk)) - } - return tr.Resource.String() + suffix -} - -// StateDependencies returns the dependencies saved in the state. -func (n *NodeAbstractResourceInstance) StateDependencies() []addrs.AbsResource { - if rs := n.ResourceState; rs != nil { - if s := rs.Instance(n.InstanceKey); s != nil { - if s.Current != nil { - return s.Current.Dependencies - } - } - } - - return nil -} - -func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { - n.ResolvedProvider = p -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return relAddr.Absolute(n.Path()), false - } - - // Use our type and containing module path to guess a provider configuration address - return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false -} - -// GraphNodeProviderConsumer -func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // If we have a config we prefer that above all else - if n.Config != nil { - relAddr := n.Config.ProviderConfigAddr() - return relAddr.Absolute(n.Path()), false - } - - // If we have state, then we will use the provider from there - if n.ResourceState != nil { - // An address from the state must match exactly, since we must ensure - // we refresh/destroy a resource with the same provider configuration - // that created it. - return n.ResourceState.ProviderConfig, true - } - - // Use our type and containing module path to guess a provider configuration address - return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) ProvisionedBy() []string { - // If we have no configuration, then we have no provisioners - if n.Config == nil || n.Config.Managed == nil { - return nil - } - - // Build the list of provisioners we need based on the configuration. - // It is okay to have duplicates here. - result := make([]string, len(n.Config.Managed.Provisioners)) - for i, p := range n.Config.Managed.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeProvisionerConsumer -func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { - if n.ProvisionerSchemas == nil { - n.ProvisionerSchemas = make(map[string]*configschema.Block) - } - n.ProvisionerSchemas[name] = schema -} - -// GraphNodeResource -func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { - return n.Addr -} - -// GraphNodeResourceInstance -func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { - return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) -} - -// GraphNodeAddressable, TODO: remove, used by target, should unify -func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { - return NewLegacyResourceAddress(n.Addr) -} - -// GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { - n.Targets = targets -} - -// GraphNodeAttachResourceState -func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { - n.ResourceState = s -} - -// GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { - n.Config = c -} - -// GraphNodeAttachResourceSchema impl -func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { - n.Schema = schema - n.SchemaVersion = version -} - -// GraphNodeDotter impl. -func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "box", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go deleted file mode 100644 index 3e2fff3a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ /dev/null @@ -1,71 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" -) - -// NodeApplyableResource represents a resource that is "applyable": -// it may need to have its record in the state adjusted to match configuration. -// -// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, -// it should be inserted into the same graph as any instances of the nodes -// with dependency edges ensuring that the resource is evaluated before any -// of its instances, which will turn ensure that the whole-resource record -// in the state is suitably prepared to receive any updates to instances. -type NodeApplyableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeResource = (*NodeApplyableResource)(nil) - _ GraphNodeEvalable = (*NodeApplyableResource)(nil) - _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) - _ GraphNodeReferencer = (*NodeApplyableResource)(nil) -) - -func (n *NodeApplyableResource) Name() string { - return n.NodeAbstractResource.Name() + " (prepare state)" -} - -func (n *NodeApplyableResource) References() []*addrs.Reference { - if n.Config == nil { - log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) - return nil - } - - var result []*addrs.Reference - - // Since this node type only updates resource-level metadata, we only - // need to worry about the parts of the configuration that affect - // our "each mode": the count and for_each meta-arguments. - refs, _ := lang.ReferencesInExpr(n.Config.Count) - result = append(result, refs...) - refs, _ = lang.ReferencesInExpr(n.Config.ForEach) - result = append(result, refs...) - - return result -} - -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - providerAddr := n.ResolvedProvider - - if config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) - return &EvalNoop{} - } - - return &EvalWriteResourceState{ - Addr: addr.Resource, - Config: config, - ProviderAddr: providerAddr, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go deleted file mode 100644 index 217a06b7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go +++ /dev/null @@ -1,431 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// NodeApplyableResourceInstance represents a resource instance that is -// "applyable": it is ready to be applied and is represented by a diff. -// -// This node is for a specific instance of a resource. It will usually be -// accompanied in the graph by a NodeApplyableResource representing its -// containing resource, and should depend on that node to ensure that the -// state is properly prepared to receive changes to instances. -type NodeApplyableResourceInstance struct { - *NodeAbstractResourceInstance - - destroyNode GraphNodeDestroyerCBD - graphNodeDeposer // implementation of GraphNodeDeposer -} - -var ( - _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) - _ GraphNodeAttachDependencies = (*NodeApplyableResourceInstance)(nil) -) - -// GraphNodeAttachDestroyer -func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { - n.destroyNode = d -} - -// createBeforeDestroy checks this nodes config status and the status af any -// companion destroy node for CreateBeforeDestroy. -func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { - cbd := false - - if n.Config != nil && n.Config.Managed != nil { - cbd = n.Config.Managed.CreateBeforeDestroy - } - - if n.destroyNode != nil { - cbd = cbd || n.destroyNode.CreateBeforeDestroy() - } - - return cbd -} - -// GraphNodeCreator -func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeReferencer, overriding NodeAbstractResourceInstance -func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { - // Start with the usual resource instance implementation - ret := n.NodeAbstractResourceInstance.References() - - // Applying a resource must also depend on the destruction of any of its - // dependencies, since this may for example affect the outcome of - // evaluating an entire list of resources with "count" set (by reducing - // the count). - // - // However, we can't do this in create_before_destroy mode because that - // would create a dependency cycle. We make a compromise here of requiring - // changes to be updated across two applies in this case, since the first - // plan will use the old values. - if !n.createBeforeDestroy() { - for _, ref := range ret { - switch tr := ref.Subject.(type) { - case addrs.ResourceInstance: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - case addrs.Resource: - newRef := *ref // shallow copy so we can mutate - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - newRef.Remaining = nil // can't access attributes of something being destroyed - ret = append(ret, &newRef) - } - } - } - - return ret -} - -// GraphNodeAttachDependencies -func (n *NodeApplyableResourceInstance) AttachDependencies(deps []addrs.AbsResource) { - n.Dependencies = deps -} - -// GraphNodeEvalable -func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - if n.Config == nil { - // This should not be possible, but we've got here in at least one - // case as discussed in the following issue: - // https://github.com/hashicorp/terraform/issues/21258 - // To avoid an outright crash here, we'll instead return an explicit - // error. - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource node has no configuration attached", - fmt.Sprintf( - "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", - addr, - ), - )) - err := diags.Err() - return &EvalReturnError{ - Error: &err, - } - } - - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - - // Stop early if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if change == nil { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // In this particular call to EvalReadData we include our planned - // change, which signals that we expect this read to complete fully - // with no unknown values; it'll produce an error if not. - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Planned: &change, // setting this indicates that the result must be complete - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - OutputState: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - - &EvalUpdateStateHook{}, - }, - } -} - -func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var diff, diffApply *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var err error - var createNew bool - var createBeforeDestroyEnabled bool - var configVal cty.Value - var deposedKey states.DeposedKey - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diffApply, - }, - - // We don't want to do any destroys - // (these are handled by NodeDestroyResourceInstance instead) - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - if diffApply.Action == plans.Delete { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) - } - if destroy && n.createBeforeDestroy() { - createBeforeDestroyEnabled = true - } - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Addr: addr.Resource, - ForceKey: n.PreallocatedDeposedKey, - OutputKey: &deposedKey, - }, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // Get the saved diff - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &diff, - }, - - // Make a new diff, in case we've learned new values in the state - // during apply which we can now incorporate. - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - PreviousDiff: &diff, - OutputChange: &diffApply, - OutputValue: &configVal, - OutputState: &state, - }, - - // Compare the diffs - &EvalCheckPlannedChange{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Planned: &diff, - Actual: &diffApply, - }, - - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &diffApply, - Destroy: false, - OutChange: &diffApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it only requires destroying, since destroying - // is handled by NodeDestroyResourceInstance. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil || diffApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - }, - &EvalApply{ - Addr: addr.Resource, - Config: n.Config, - State: &state, - Change: &diffApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, // EvalApplyProvisioners will skip if already tainted - ResourceConfig: n.Config, - CreateNew: &createNew, - Error: &err, - When: configs.ProvisionerWhenCreate, - }, - &EvalMaybeTainted{ - Addr: addr.Resource, - State: &state, - Change: &diffApply, - Error: &err, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalMaybeRestoreDeposedObject{ - Addr: addr.Resource, - PlannedChange: &diffApply, - Key: &deposedKey, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if !diff.Action.IsReplace() { - return true, nil - } - if !n.createBeforeDestroy() { - return true, nil - } - return false, nil - }, - Then: &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: nil, - }, - }, - - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go deleted file mode 100644 index 0374d83d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ /dev/null @@ -1,348 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" -) - -// NodeDestroyResourceInstance represents a resource instance that is to be -// destroyed. -type NodeDestroyResourceInstance struct { - *NodeAbstractResourceInstance - - // If DeposedKey is set to anything other than states.NotDeposed then - // this node destroys a deposed object of the associated instance - // rather than its current object. - DeposedKey states.DeposedKey - - CreateBeforeDestroyOverride *bool -} - -var ( - _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) -) - -func (n *NodeDestroyResourceInstance) Name() string { - if n.DeposedKey != states.NotDeposed { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) - } - return n.ResourceInstanceAddr().String() + " (destroy)" -} - -// GraphNodeDestroyer -func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { - if n.CreateBeforeDestroyOverride != nil { - return *n.CreateBeforeDestroyOverride - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { - n.CreateBeforeDestroyOverride = &v - return nil -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { - normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() - destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) - - phaseType := addrs.ResourceInstancePhaseDestroy - if n.CreateBeforeDestroy() { - phaseType = addrs.ResourceInstancePhaseDestroyCBD - } - - for i, normalAddr := range normalAddrs { - switch ta := normalAddr.(type) { - case addrs.Resource: - destroyAddrs[i] = ta.Phase(phaseType) - case addrs.ResourceInstance: - destroyAddrs[i] = ta.Phase(phaseType) - default: - destroyAddrs[i] = normalAddr - } - } - - return destroyAddrs -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { - // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil && c.Managed != nil { - var result []*addrs.Reference - - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - for _, p := range c.Managed.Provisioners { - schema := n.ProvisionerSchemas[p.Type] - - if p.When == configs.ProvisionerWhenDestroy { - if p.Connection != nil { - result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) - } - result = append(result, ReferencesFromConfig(p.Config, schema)...) - } - } - - return result - } - - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Get our state - rs := n.ResourceState - var is *states.ResourceInstance - if rs != nil { - is = rs.Instance(n.InstanceKey) - } - if is == nil { - log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) - } - - var changeApply *plans.ResourceInstanceChange - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var err error - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &changeApply, - }, - - &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &changeApply, - Destroy: true, - OutChange: &changeApply, - }, - - // EvalReduceDiff may have simplified our planned change - // into a NoOp if it does not require destroying. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if changeApply == nil || changeApply.Action == plans.NoOp { - return true, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalReadState{ - Addr: addr.Resource, - Output: &state, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalRequireState{ - State: &state, - }, - - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &changeApply, - }, - - // Run destroy provisioners if not tainted - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Status == states.ObjectTainted { - return false, nil - } - - return true, nil - }, - - Then: &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, - ResourceConfig: n.Config, - Error: &err, - When: configs.ProvisionerWhenDestroy, - }, - }, - - // If we have a provisioning error, then we just call - // the post-apply hook now. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return err != nil, nil - }, - - Then: &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - }, - - // Make sure we handle data sources properly. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil - }, - - Then: &EvalReadDataApply{ - Addr: addr.Resource, - Config: n.Config, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - }, - Else: &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - } -} - -// NodeDestroyResourceInstance represents a resource that is to be destroyed. -// -// Destroying a resource is a state-only operation: it is the individual -// instances being destroyed that affects remote objects. During graph -// construction, NodeDestroyResource should always depend on any other node -// related to the given resource, since it's just a final cleanup to avoid -// leaving skeleton resource objects in state after their instances have -// all been destroyed. -type NodeDestroyResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeResource = (*NodeDestroyResource)(nil) - _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) - _ GraphNodeReferencer = (*NodeDestroyResource)(nil) - _ GraphNodeEvalable = (*NodeDestroyResource)(nil) - - // FIXME: this is here to document that this node is both - // GraphNodeProviderConsumer by virtue of the embedded - // NodeAbstractResource, but that behavior is not desired and we skip it by - // checking for GraphNodeNoProvider. - _ GraphNodeProviderConsumer = (*NodeDestroyResource)(nil) - _ GraphNodeNoProvider = (*NodeDestroyResource)(nil) -) - -func (n *NodeDestroyResource) Name() string { - return n.ResourceAddr().String() + " (clean up state)" -} - -// GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { - // NodeDestroyResource doesn't participate in references: the graph - // builder that created it should ensure directly that it already depends - // on every other node related to its resource, without relying on - // references. - return nil -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResource) References() []*addrs.Reference { - // NodeDestroyResource doesn't participate in references: the graph - // builder that created it should ensure directly that it already depends - // on every other node related to its resource, without relying on - // references. - return nil -} - -// GraphNodeEvalable -func (n *NodeDestroyResource) EvalTree() EvalNode { - // This EvalNode will produce an error if the resource isn't already - // empty by the time it is called, since it should just be pruning the - // leftover husk of a resource in state after all of the child instances - // and their objects were destroyed. - return &EvalForgetResourceState{ - Addr: n.ResourceAddr().Resource, - } -} - -// GraphNodeResource -func (n *NodeDestroyResource) ResourceAddr() addrs.AbsResource { - return n.NodeAbstractResource.ResourceAddr() -} - -// GraphNodeSubpath -func (n *NodeDestroyResource) Path() addrs.ModuleInstance { - return n.NodeAbstractResource.Path() -} - -// GraphNodeNoProvider -// FIXME: this should be removed once the node can be separated from the -// Internal NodeAbstractResource behavior. -func (n *NodeDestroyResource) NoProvider() { -} - -type GraphNodeNoProvider interface { - NoProvider() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go deleted file mode 100644 index e0d5db83..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert -// an abstract resource instance to a concrete one of some type that has -// an associated deposed object key. -type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex - -type GraphNodeDeposedResourceInstanceObject interface { - DeposedInstanceObjectKey() states.DeposedKey -} - -// NodePlanDeposedResourceInstanceObject represents deposed resource -// instance objects during plan. These are distinct from the primary object -// for each resource instance since the only valid operation to do with them -// is to destroy them. -// -// This node type is also used during the refresh walk to ensure that the -// record of a deposed object is up-to-date before we plan to destroy it. -type NodePlanDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) -) - -func (n *NodePlanDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) -} - -func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeEvalable impl. -func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // During the refresh walk we will ensure that our record of the deposed - // object is up-to-date. If it was already deleted outside of Terraform - // then this will remove it from state and thus avoid us planning a - // destroy for it during the subsequent plan walk. - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Key: n.DeposedKey, - Output: &state, - }, - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - }, - }) - - // During the plan walk we always produce a planned destroy change, because - // destroying is the only supported action for deposed objects. - var change *plans.ResourceInstanceChange - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan, walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - DeposedKey: n.DeposedKey, - State: &state, - Output: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - DeposedKey: n.DeposedKey, - ProviderSchema: &providerSchema, - Change: &change, - }, - // Since deposed objects cannot be referenced by expressions - // elsewhere, we don't need to also record the planned new - // state in this case. - }, - }, - }) - - return seq -} - -// NodeDestroyDeposedResourceInstanceObject represents deposed resource -// instance objects during apply. Nodes of this type are inserted by -// DiffTransformer when the planned changeset contains "delete" changes for -// deposed instance objects, and its only supported operation is to destroy -// and then forget the associated object. -type NodeDestroyDeposedResourceInstanceObject struct { - *NodeAbstractResourceInstance - DeposedKey states.DeposedKey -} - -var ( - _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) - _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) -) - -func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { - return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) -} - -func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { - return n.DeposedKey -} - -// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { - // Deposed objects don't participate in references. - return nil -} - -// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance -func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { - // We don't evaluate configuration for deposed objects, so they effectively - // make no references. - return nil -} - -// GraphNodeDestroyer -func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { - // A deposed instance is always CreateBeforeDestroy by definition, since - // we use deposed only to handle create-before-destroy. - return true -} - -// GraphNodeDestroyerCBD -func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { - if !v { - // Should never happen: deposed instances are _always_ create_before_destroy. - return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") - } - return nil -} - -// GraphNodeEvalable impl. -func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - var change *plans.ResourceInstanceChange - var err error - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - // Call pre-apply hook - &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &change, - }, - &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &err, - }, - // Always write the resource back to the state deposed... if it - // was successfully destroyed it will be pruned. If it was not, it will - // be caught on the next run. - &EvalWriteStateDeposed{ - Addr: addr.Resource, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &err, - }, - &EvalReturnError{ - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - } -} - -// GraphNodeDeposer is an optional interface implemented by graph nodes that -// might create a single new deposed object for a specific associated resource -// instance, allowing a caller to optionally pre-allocate a DeposedKey for -// it. -type GraphNodeDeposer interface { - // SetPreallocatedDeposedKey will be called during graph construction - // if a particular node must use a pre-allocated deposed key if/when it - // "deposes" the current object of its associated resource instance. - SetPreallocatedDeposedKey(key states.DeposedKey) -} - -// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. -// Embed it in a node type to get automatic support for it, and then access -// the field PreallocatedDeposedKey to access any pre-allocated key. -type graphNodeDeposer struct { - PreallocatedDeposedKey states.DeposedKey -} - -func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { - n.PreallocatedDeposedKey = key -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go deleted file mode 100644 index ec4aa932..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go +++ /dev/null @@ -1,166 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// NodePlannableResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodePlannableResource struct { - *NodeAbstractResource - - // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD - // during graph construction, if dependencies require us to force this - // on regardless of what the configuration says. - ForceCreateBeforeDestroy *bool -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResource)(nil) - _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) - _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) - _ GraphNodeReferenceable = (*NodePlannableResource)(nil) - _ GraphNodeReferencer = (*NodePlannableResource)(nil) - _ GraphNodeResource = (*NodePlannableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - if config == nil { - // Nothing to do, then. - log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) - return &EvalNoop{} - } - - // this ensures we can reference the resource even if the count is 0 - return &EvalWriteResourceState{ - Addr: addr.Resource, - Config: config, - ProviderAddr: n.ResolvedProvider, - } -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) CreateBeforeDestroy() bool { - if n.ForceCreateBeforeDestroy != nil { - return *n.ForceCreateBeforeDestroy - } - - // If we have no config, we just assume no - if n.Config == nil || n.Config.Managed == nil { - return false - } - - return n.Config.Managed.CreateBeforeDestroy -} - -// GraphNodeDestroyerCBD -func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { - n.ForceCreateBeforeDestroy = &v - return nil -} - -// GraphNodeDynamicExpandable -func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - - return &NodePlannableResourceInstance{ - NodeAbstractResourceInstance: a, - - // By the time we're walking, we've figured out whether we need - // to force on CreateBeforeDestroy due to dependencies on other - // nodes that have it. - ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), - } - } - - // The concrete resource factory we'll use for orphans - concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Schema = n.Schema - a.ProvisionerSchemas = n.ProvisionerSchemas - - return &NodePlannableResourceInstanceOrphan{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count or for_each (if present) - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count/for_each orphans - &OrphanResourceCountTransformer{ - Concrete: concreteResourceOrphan, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodePlannableResource", - } - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go deleted file mode 100644 index decc372a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go +++ /dev/null @@ -1,88 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// NodePlanDestroyableResourceInstance represents a resource that is ready -// to be planned for destruction. -type NodePlanDestroyableResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) - _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. These are written to by address in the EvalNodes we - // declare below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - if n.ResolvedProvider.ProviderConfig.Type.String() == "" { - // Should never happen; indicates that the graph was not constructed - // correctly since we didn't get our provider attached. - panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go deleted file mode 100644 index 05ccefc3..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ /dev/null @@ -1,200 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" -) - -// NodePlannableResourceInstance represents a _single_ resource -// instance that is plannable. This means this represents a single -// count index, for example. -type NodePlannableResourceInstance struct { - *NodeAbstractResourceInstance - ForceCreateBeforeDestroy bool -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) -) - -// GraphNodeEvalable -func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - return n.evalTreeManagedResource(addr) - case addrs.DataResourceMode: - return n.evalTreeDataResource(addr) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } -} - -func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var configVal cty.Value - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - // If we already have a non-planned state then we already dealt - // with this during the refresh walk and so we have nothing to do - // here. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - depChanges := false - - // Check and see if any of our dependencies have changes. - changes := ctx.Changes() - for _, d := range n.References() { - ri, ok := d.Subject.(addrs.ResourceInstance) - if !ok { - continue - } - change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen) - if change != nil && change.Action != plans.NoOp { - depChanges = true - break - } - } - - refreshed := state != nil && state.Status != states.ObjectPlanned - - // If there are no dependency changes, and it's not a forced - // read because we there was no Refresh, then we don't need - // to re-read. If any dependencies have changes, it means - // our config may also have changes and we need to Read the - // data source again. - if !depChanges && refreshed { - return false, EvalEarlyExitError{} - } - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalReadData{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready - OutputChange: &change, - OutputValue: &configVal, - OutputState: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} - -func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { - config := n.Config - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - CreateBeforeDestroy: n.ForceCreateBeforeDestroy, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - ProviderSchema: &providerSchema, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go deleted file mode 100644 index 84166949..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go +++ /dev/null @@ -1,84 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlannableResourceInstanceOrphan struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -var ( - _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) -) - -func (n *NodePlannableResourceInstanceOrphan) Name() string { - return n.ResourceInstanceAddr().String() + " (orphan)" -} - -// GraphNodeEvalable -func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - var provider providers.Interface - var providerSchema *ProviderSchema - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - &EvalDiffDestroy{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - Output: &change, - OutputState: &state, // Will point to a nil state after this complete, signalling destroyed - }, - &EvalCheckPreventDestroy{ - Addr: addr.Resource, - Config: n.Config, - Change: &change, - }, - &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - }, - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go deleted file mode 100644 index 2dd549c0..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ /dev/null @@ -1,314 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -// NodeRefreshableManagedResource represents a resource that is expanabled into -// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. -type NodeRefreshableManagedResource struct { - *NodeAbstractResource - - // We attach dependencies to the Resource during refresh, since the - // instances are instantiated during DynamicExpand. - Dependencies []addrs.AbsResource -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) - _ GraphNodeAttachDependencies = (*NodeRefreshableManagedResource)(nil) -) - -// GraphNodeAttachDependencies -func (n *NodeRefreshableManagedResource) AttachDependencies(deps []addrs.AbsResource) { - n.Dependencies = deps -} - -// GraphNodeDynamicExpandable -func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) - diags = diags.Append(countDiags) - if countDiags.HasErrors() { - return nil, diags.Err() - } - - forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) - if forEachDiags.HasErrors() { - return nil, diags.Err() - } - - // Next we need to potentially rename an instance address in the state - // if we're transitioning whether "count" is set at all. - fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) - - // Our graph transformers require access to the full state, so we'll - // temporarily lock it while we work on this. - state := ctx.State().Lock() - defer ctx.State().Unlock() - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - a.ResolvedProvider = n.ResolvedProvider - a.Dependencies = n.Dependencies - - return &NodeRefreshableManagedResourceInstance{ - NodeAbstractResourceInstance: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Schema: n.Schema, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - }, - - // Add the count orphans to make sure these resources are accounted for - // during a scale in. - &OrphanResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - ForEach: forEachMap, - Addr: n.ResourceAddr(), - State: state, - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{Targets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeRefreshableManagedResource", - } - - graph, diags := b.Build(ctx.Path()) - return graph, diags.ErrWithWarnings() -} - -// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeRefreshableManagedResourceInstance struct { - *NodeAbstractResourceInstance -} - -var ( - _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) - _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) -) - -// GraphNodeDestroyer -func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { - addr := n.ResourceInstanceAddr() - return &addr -} - -// GraphNodeEvalable -func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { - addr := n.ResourceInstanceAddr() - - // Eval info is different depending on what kind of resource this is - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - if n.ResourceState == nil { - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) - return n.evalTreeManagedResourceNoState() - } - log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) - return n.evalTreeManagedResource() - - case addrs.DataResourceMode: - // Get the data source node. If we don't have a configuration - // then it is an orphan so we destroy it (remove it from the state). - var dn GraphNodeEvalable - if n.Config != nil { - dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } else { - dn = &NodeDestroyableDataResourceInstance{ - NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, - } - } - - return dn.EvalTree() - default: - panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) - } -} - -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var state *states.ResourceInstanceObject - - // This happened during initial development. All known cases were - // fixed and tested but as a sanity check let's assert here. - if n.ResourceState == nil { - err := fmt.Errorf( - "No resource state attached for addr: %s\n\n"+ - "This is a bug. Please report this to Terraform with your configuration\n"+ - "and state attached. Please be careful to scrub any sensitive information.", - addr) - return &EvalReturnError{Error: &err} - } - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalRefreshDependencies{ - State: &state, - Dependencies: &n.Dependencies, - }, - - &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - }, - } -} - -// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource -// nodes that don't have state attached. An example of where this functionality -// is useful is when a resource that already exists in state is being scaled -// out, ie: has its resource count increased. In this case, the scaled out node -// needs to be available to other nodes (namely data sources) that may depend -// on it for proper interpolation, or confusing "index out of range" errors can -// occur. -// -// The steps in this sequence are very similar to the steps carried out in -// plan, but nothing is done with the diff after it is created - it is dropped, -// and its changes are not counted in the UI. -func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { - addr := n.ResourceInstanceAddr() - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider providers.Interface - var providerSchema *ProviderSchema - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - - &EvalReadState{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - }, - - &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - OutputChange: &change, - OutputState: &state, - Stub: true, - }, - - &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - }, - - // We must also save the planned change, so that expressions in - // other nodes, such as provider configurations and data resources, - // can work with the planned new value. - // - // This depends on the fact that Context.Refresh creates a - // temporary new empty changeset for the duration of its graph - // walk, and so this recorded change will be discarded immediately - // after the refresh walk completes. - &EvalWriteDiff{ - Addr: addr.Resource, - Change: &change, - ProviderSchema: &providerSchema, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go deleted file mode 100644 index efa657bf..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go +++ /dev/null @@ -1,90 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/zclconf/go-cty/cty" -) - -// NodeValidatableResource represents a resource that is used for validation -// only. -type NodeValidatableResource struct { - *NodeAbstractResource -} - -var ( - _ GraphNodeSubPath = (*NodeValidatableResource)(nil) - _ GraphNodeEvalable = (*NodeValidatableResource)(nil) - _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) - _ GraphNodeReferencer = (*NodeValidatableResource)(nil) - _ GraphNodeResource = (*NodeValidatableResource)(nil) - _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) -) - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - addr := n.ResourceAddr() - config := n.Config - - // Declare the variables will be used are used to pass values along - // the evaluation sequence below. These are written to via pointers - // passed to the EvalNodes. - var provider providers.Interface - var providerSchema *ProviderSchema - var configVal cty.Value - - seq := &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalValidateResource{ - Addr: addr.Resource, - Provider: &provider, - ProviderSchema: &providerSchema, - Config: config, - ConfigVal: &configVal, - }, - }, - } - - if managed := n.Config.Managed; managed != nil { - hasCount := n.Config.Count != nil - hasForEach := n.Config.ForEach != nil - - // Validate all the provisioners - for _, p := range managed.Provisioners { - var provisioner provisioners.Interface - var provisionerSchema *configschema.Block - - if p.Connection == nil { - p.Connection = config.Managed.Connection - } else if config.Managed.Connection != nil { - p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) - } - - seq.Nodes = append( - seq.Nodes, - &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - Schema: &provisionerSchema, - }, - &EvalValidateProvisioner{ - ResourceAddr: addr.Resource, - Provisioner: &provisioner, - Schema: &provisionerSchema, - Config: p, - ResourceHasCount: hasCount, - ResourceHasForEach: hasForEach, - }, - ) - } - } - - return seq -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go deleted file mode 100644 index e3aee6fc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go +++ /dev/null @@ -1,64 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// NodeRootVariable represents a root variable input. -type NodeRootVariable struct { - Addr addrs.InputVariable - Config *configs.Variable -} - -var ( - _ GraphNodeSubPath = (*NodeRootVariable)(nil) - _ GraphNodeReferenceable = (*NodeRootVariable)(nil) - _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) -) - -func (n *NodeRootVariable) Name() string { - return n.Addr.String() -} - -// GraphNodeSubPath -func (n *NodeRootVariable) Path() addrs.ModuleInstance { - return addrs.RootModuleInstance -} - -// GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { - return []addrs.Referenceable{n.Addr} -} - -// GraphNodeEvalable -func (n *NodeRootVariable) EvalTree() EvalNode { - // We don't actually need to _evaluate_ a root module variable, because - // its value is always constant and already stashed away in our EvalContext. - // However, we might need to run some user-defined validation rules against - // the value. - - if n.Config == nil || len(n.Config.Validations) == 0 { - return &EvalSequence{} // nothing to do - } - - return &evalVariableValidations{ - Addr: addrs.RootModuleInstance.InputVariable(n.Addr.Name), - Config: n.Config, - Expr: nil, // not set for root module variables - - IgnoreDiagnostics: false, - } -} - -// dag.GraphNodeDotter impl. -func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "note", - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go deleted file mode 100644 index af04c6cd..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ /dev/null @@ -1,122 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/gob" - "fmt" - "io" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs" -) - -func init() { - gob.Register(make([]interface{}, 0)) - gob.Register(make([]map[string]interface{}, 0)) - gob.Register(make(map[string]interface{})) - gob.Register(make(map[string]string)) -} - -// Plan represents a single Terraform execution plan, which contains -// all the information necessary to make an infrastructure change. -// -// A plan has to contain basically the entire state of the world -// necessary to make a change: the state, diff, config, backend config, etc. -// This is so that it can run alone without any other data. -type Plan struct { - // Diff describes the resource actions that must be taken when this - // plan is applied. - Diff *Diff - - // Config represents the entire configuration that was present when this - // plan was created. - Config *configs.Config - - // State is the Terraform state that was current when this plan was - // created. - // - // It is not allowed to apply a plan that has a stale state, since its - // diff could be outdated. - State *State - - // Vars retains the variables that were set when creating the plan, so - // that the same variables can be applied during apply. - Vars map[string]cty.Value - - // Targets, if non-empty, contains a set of resource address strings that - // identify graph nodes that were selected as targets for plan. - // - // When targets are set, any graph node that is not directly targeted or - // indirectly targeted via dependencies is excluded from the graph. - Targets []string - - // TerraformVersion is the version of Terraform that was used to create - // this plan. - // - // It is not allowed to apply a plan created with a different version of - // Terraform, since the other fields of this structure may be interpreted - // in different ways between versions. - TerraformVersion string - - // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries - // used as plugins for each provider during plan. - // - // These must match between plan and apply to ensure that the diff is - // correctly interpreted, since different provider versions may have - // different attributes or attribute value constraints. - ProviderSHA256s map[string][]byte - - // Backend is the backend that this plan should use and store data with. - Backend *BackendState - - // Destroy indicates that this plan was created for a full destroy operation - Destroy bool - - once sync.Once -} - -func (p *Plan) String() string { - buf := new(bytes.Buffer) - buf.WriteString("DIFF:\n\n") - buf.WriteString(p.Diff.String()) - buf.WriteString("\n\nSTATE:\n\n") - buf.WriteString(p.State.String()) - return buf.String() -} - -func (p *Plan) init() { - p.once.Do(func() { - if p.Diff == nil { - p.Diff = new(Diff) - p.Diff.init() - } - - if p.State == nil { - p.State = new(State) - p.State.init() - } - - if p.Vars == nil { - p.Vars = make(map[string]cty.Value) - } - }) -} - -// The format byte is prefixed into the plan file format so that we have -// the ability in the future to change the file format if we want for any -// reason. -const planFormatMagic = "tfplan" -const planFormatVersion byte = 2 - -// ReadPlan reads a plan structure out of a reader in the format that -// was written by WritePlan. -func ReadPlan(src io.Reader) (*Plan, error) { - return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") -} - -// WritePlan writes a plan somewhere in a binary format. -func WritePlan(d *Plan, dst io.Writer) error { - return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go deleted file mode 100644 index d82dc0f4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go +++ /dev/null @@ -1,521 +0,0 @@ -package terraform - -import ( - "encoding/json" - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/tfdiags" -) - -var _ providers.Interface = (*MockProvider)(nil) - -// MockProvider implements providers.Interface but mocks out all the -// calls for testing purposes. -type MockProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests - - PrepareProviderConfigCalled bool - PrepareProviderConfigResponse providers.PrepareProviderConfigResponse - PrepareProviderConfigRequest providers.PrepareProviderConfigRequest - PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse - - ValidateResourceTypeConfigCalled bool - ValidateResourceTypeConfigTypeName string - ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse - ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest - ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse - - ValidateDataSourceConfigCalled bool - ValidateDataSourceConfigTypeName string - ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse - ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest - ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse - - UpgradeResourceStateCalled bool - UpgradeResourceStateTypeName string - UpgradeResourceStateResponse providers.UpgradeResourceStateResponse - UpgradeResourceStateRequest providers.UpgradeResourceStateRequest - UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse - - ConfigureCalled bool - ConfigureResponse providers.ConfigureResponse - ConfigureRequest providers.ConfigureRequest - ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below - - StopCalled bool - StopFn func() error - StopResponse error - - ReadResourceCalled bool - ReadResourceResponse providers.ReadResourceResponse - ReadResourceRequest providers.ReadResourceRequest - ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse - - PlanResourceChangeCalled bool - PlanResourceChangeResponse providers.PlanResourceChangeResponse - PlanResourceChangeRequest providers.PlanResourceChangeRequest - PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse - - ApplyResourceChangeCalled bool - ApplyResourceChangeResponse providers.ApplyResourceChangeResponse - ApplyResourceChangeRequest providers.ApplyResourceChangeRequest - ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse - - ImportResourceStateCalled bool - ImportResourceStateResponse providers.ImportResourceStateResponse - ImportResourceStateRequest providers.ImportResourceStateRequest - ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - // Legacy return type for existing tests, which will be shimmed into an - // ImportResourceStateResponse if set - ImportStateReturn []*InstanceState - - ReadDataSourceCalled bool - ReadDataSourceResponse providers.ReadDataSourceResponse - ReadDataSourceRequest providers.ReadDataSourceRequest - ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse - - CloseCalled bool - CloseError error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ValidateFn func(c *ResourceConfig) (ws []string, es []error) - ConfigureFn func(c *ResourceConfig) error - DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) - ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) -} - -func (p *MockProvider) GetSchema() providers.GetSchemaResponse { - p.Lock() - defer p.Unlock() - p.GetSchemaCalled = true - return p.getSchema() -} - -func (p *MockProvider) getSchema() providers.GetSchemaResponse { - // This version of getSchema doesn't do any locking, so it's suitable to - // call from other methods of this mock as long as they are already - // holding the lock. - - ret := providers.GetSchemaResponse{ - Provider: providers.Schema{}, - DataSources: map[string]providers.Schema{}, - ResourceTypes: map[string]providers.Schema{}, - } - if p.GetSchemaReturn != nil { - ret.Provider.Block = p.GetSchemaReturn.Provider - for n, s := range p.GetSchemaReturn.DataSources { - ret.DataSources[n] = providers.Schema{ - Block: s, - } - } - for n, s := range p.GetSchemaReturn.ResourceTypes { - ret.ResourceTypes[n] = providers.Schema{ - Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), - Block: s, - } - } - } - - return ret -} - -func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { - p.Lock() - defer p.Unlock() - - p.PrepareProviderConfigCalled = true - p.PrepareProviderConfigRequest = r - if p.PrepareProviderConfigFn != nil { - return p.PrepareProviderConfigFn(r) - } - return p.PrepareProviderConfigResponse -} - -func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateResourceTypeConfigCalled = true - p.ValidateResourceTypeConfigRequest = r - - if p.ValidateFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - warns, errs := p.ValidateFn(rc) - ret := providers.ValidateResourceTypeConfigResponse{} - for _, warn := range warns { - ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) - } - for _, err := range errs { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - } - if p.ValidateResourceTypeConfigFn != nil { - return p.ValidateResourceTypeConfigFn(r) - } - - return p.ValidateResourceTypeConfigResponse -} - -func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceConfigCalled = true - p.ValidateDataSourceConfigRequest = r - - if p.ValidateDataSourceConfigFn != nil { - return p.ValidateDataSourceConfigFn(r) - } - - return p.ValidateDataSourceConfigResponse -} - -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { - p.Lock() - defer p.Unlock() - - schemas := p.getSchema() - schema := schemas.ResourceTypes[r.TypeName] - schemaType := schema.Block.ImpliedType() - - p.UpgradeResourceStateCalled = true - p.UpgradeResourceStateRequest = r - - if p.UpgradeResourceStateFn != nil { - return p.UpgradeResourceStateFn(r) - } - - resp := p.UpgradeResourceStateResponse - - if resp.UpgradedState == cty.NilVal { - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } - } - return resp -} - -func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureRequest = r - - if p.ConfigureFn != nil { - resp := p.getSchema() - schema := resp.Provider.Block - rc := NewResourceConfigShimmed(r.Config, schema) - ret := providers.ConfigureResponse{} - - err := p.ConfigureFn(rc) - if err != nil { - ret.Diagnostics = ret.Diagnostics.Append(err) - } - return ret - } - if p.ConfigureNewFn != nil { - return p.ConfigureNewFn(r) - } - - return p.ConfigureResponse -} - -func (p *MockProvider) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provider itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadResourceCalled = true - p.ReadResourceRequest = r - - if p.ReadResourceFn != nil { - return p.ReadResourceFn(r) - } - - // make sure the NewState fits the schema - newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) - if err != nil { - panic(err) - } - resp := p.ReadResourceResponse - resp.NewState = newState - - return resp -} - -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { - p.Lock() - defer p.Unlock() - - p.PlanResourceChangeCalled = true - p.PlanResourceChangeRequest = r - - if p.DiffFn != nil { - ps := p.getSchema() - if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { - return providers.PlanResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), - } - } - schema := ps.ResourceTypes[r.TypeName].Block - info := &InstanceInfo{ - Type: r.TypeName, - } - priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) - cfg := NewResourceConfigShimmed(r.Config, schema) - - legacyDiff, err := p.DiffFn(info, priorState, cfg) - - var res providers.PlanResourceChangeResponse - res.PlannedState = r.ProposedNewState - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - if legacyDiff != nil { - newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - - res.PlannedState = newVal - - var requiresNew []string - for attr, d := range legacyDiff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) - if err != nil { - res.Diagnostics = res.Diagnostics.Append(err) - } - res.RequiresReplace = requiresReplace - } - return res - } - if p.PlanResourceChangeFn != nil { - return p.PlanResourceChangeFn(r) - } - - return p.PlanResourceChangeResponse -} - -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - p.Lock() - p.ApplyResourceChangeCalled = true - p.ApplyResourceChangeRequest = r - p.Unlock() - - if p.ApplyFn != nil { - // ApplyFn is a special callback fashioned after our old provider - // interface, which expected to be given an actual diff rather than - // separate old/new values to apply. Therefore we need to approximate - // a diff here well enough that _most_ of our legacy ApplyFns in old - // tests still see the behavior they are expecting. New tests should - // not use this, and should instead use ApplyResourceChangeFn directly. - providerSchema := p.getSchema() - schema, ok := providerSchema.ResourceTypes[r.TypeName] - if !ok { - return providers.ApplyResourceChangeResponse{ - Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), - } - } - - info := &InstanceInfo{ - Type: r.TypeName, - } - - priorVal := r.PriorState - plannedVal := r.PlannedState - priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) - plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) - s := NewInstanceStateShimmedFromValue(priorVal, 0) - d := &InstanceDiff{ - Attributes: make(map[string]*ResourceAttrDiff), - } - if plannedMap == nil { // destroying, then - d.Destroy = true - // Destroy diffs don't have any attribute diffs - } else { - if priorMap == nil { // creating, then - // We'll just make an empty prior map to make things easier below. - priorMap = make(map[string]string) - } - - for k, new := range plannedMap { - old := priorMap[k] - newComputed := false - if new == hcl2shim.UnknownVariableValue { - new = "" - newComputed = true - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - New: new, - NewComputed: newComputed, - Type: DiffAttrInput, // not generally used in tests, so just hard-coded - } - } - // Also need any attributes that were removed in "planned" - for k, old := range priorMap { - if _, ok := plannedMap[k]; ok { - continue - } - d.Attributes[k] = &ResourceAttrDiff{ - Old: old, - NewRemoved: true, - Type: DiffAttrInput, - } - } - } - newState, err := p.ApplyFn(info, s, d) - resp := providers.ApplyResourceChangeResponse{} - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - if newState != nil { - var newVal cty.Value - if newState != nil { - var err error - newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - } else { - // If apply returned a nil new state then that's the old way to - // indicate that the object was destroyed. Our new interface calls - // for that to be signalled as a null value. - newVal = cty.NullVal(schema.Block.ImpliedType()) - } - resp.NewState = newVal - } - - return resp - } - if p.ApplyResourceChangeFn != nil { - return p.ApplyResourceChangeFn(r) - } - - return p.ApplyResourceChangeResponse -} - -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { - p.Lock() - defer p.Unlock() - - if p.ImportStateReturn != nil { - for _, is := range p.ImportStateReturn { - if is.Attributes == nil { - is.Attributes = make(map[string]string) - } - is.Attributes["id"] = is.ID - - typeName := is.Ephemeral.Type - // Use the requested type if the resource has no type of it's own. - // We still return the empty type, which will error, but this prevents a panic. - if typeName == "" { - typeName = r.TypeName - } - - schema := p.GetSchemaReturn.ResourceTypes[typeName] - if schema == nil { - panic("no schema found for " + typeName) - } - - private, err := json.Marshal(is.Meta) - if err != nil { - panic(err) - } - - state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) - if err != nil { - panic(err) - } - - state, err = schema.CoerceValue(state) - if err != nil { - panic(err) - } - - p.ImportResourceStateResponse.ImportedResources = append( - p.ImportResourceStateResponse.ImportedResources, - providers.ImportedResource{ - TypeName: is.Ephemeral.Type, - State: state, - Private: private, - }) - } - } - - p.ImportResourceStateCalled = true - p.ImportResourceStateRequest = r - if p.ImportResourceStateFn != nil { - return p.ImportResourceStateFn(r) - } - - return p.ImportResourceStateResponse -} - -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { - p.Lock() - defer p.Unlock() - - p.ReadDataSourceCalled = true - p.ReadDataSourceRequest = r - - if p.ReadDataSourceFn != nil { - return p.ReadDataSourceFn(r) - } - - return p.ReadDataSourceResponse -} - -func (p *MockProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go deleted file mode 100644 index d476e4ea..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go +++ /dev/null @@ -1,153 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/provisioners" -) - -var _ provisioners.Interface = (*MockProvisioner)(nil) - -// MockProvisioner implements provisioners.Interface but mocks out all the -// calls for testing purposes. -type MockProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetSchemaCalled bool - GetSchemaResponse provisioners.GetSchemaResponse - - ValidateProvisionerConfigCalled bool - ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest - ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse - ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse - - ProvisionResourceCalled bool - ProvisionResourceRequest provisioners.ProvisionResourceRequest - ProvisionResourceResponse provisioners.ProvisionResourceResponse - ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse - - StopCalled bool - StopResponse error - StopFn func() error - - CloseCalled bool - CloseResponse error - CloseFn func() error - - // Legacy callbacks: if these are set, we will shim incoming calls for - // new-style methods to these old-fashioned terraform.ResourceProvider - // mock callbacks, for the benefit of older tests that were written against - // the old mock API. - ApplyFn func(rs *InstanceState, c *ResourceConfig) error -} - -func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - return p.getSchema() -} - -// getSchema is the implementation of GetSchema, which can be called from other -// methods on MockProvisioner that may already be holding the lock. -func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { - return p.GetSchemaResponse -} - -func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { - p.Lock() - defer p.Unlock() - - p.ValidateProvisionerConfigCalled = true - p.ValidateProvisionerConfigRequest = r - if p.ValidateProvisionerConfigFn != nil { - return p.ValidateProvisionerConfigFn(r) - } - return p.ValidateProvisionerConfigResponse -} - -func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { - p.Lock() - defer p.Unlock() - - p.ProvisionResourceCalled = true - p.ProvisionResourceRequest = r - if p.ApplyFn != nil { - if !r.Config.IsKnown() { - panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) - } - - schema := p.getSchema() - rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) - connVal := r.Connection - connMap := map[string]string{} - - if !connVal.IsNull() && connVal.IsKnown() { - for it := connVal.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - - if !av.IsKnown() || av.IsNull() { - continue - } - - av, _ = convert.Convert(av, cty.String) - connMap[name] = av.AsString() - } - } - - // We no longer pass the full instance state to a provisioner, so we'll - // construct a partial one that should be good enough for what existing - // test mocks need. - is := &InstanceState{ - Ephemeral: EphemeralState{ - ConnInfo: connMap, - }, - } - var resp provisioners.ProvisionResourceResponse - err := p.ApplyFn(is, rc) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - } - return resp - } - if p.ProvisionResourceFn != nil { - fn := p.ProvisionResourceFn - return fn(r) - } - - return p.ProvisionResourceResponse -} - -func (p *MockProvisioner) Stop() error { - // We intentionally don't lock in this one because the whole point of this - // method is to be called concurrently with another operation that can - // be cancelled. The provisioner itself is responsible for handling - // any concurrency concerns in this case. - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopResponse -} - -func (p *MockProvisioner) Close() error { - p.Lock() - defer p.Unlock() - - p.CloseCalled = true - if p.CloseFn != nil { - return p.CloseFn() - } - - return p.CloseResponse -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go deleted file mode 100644 index fcf28aa6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ /dev/null @@ -1,551 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/copystructure" - "github.com/mitchellh/reflectwalk" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// Resource is a legacy way to identify a particular resource instance. -// -// New code should use addrs.ResourceInstance instead. This is still here -// only for codepaths that haven't been updated yet. -type Resource struct { - // These are all used by the new EvalNode stuff. - Name string - Type string - CountIndex int - - // These aren't really used anymore anywhere, but we keep them around - // since we haven't done a proper cleanup yet. - Id string - Info *InstanceInfo - Config *ResourceConfig - Dependencies []string - Diff *InstanceDiff - Provider ResourceProvider - State *InstanceState - Flags ResourceFlag -} - -// NewResource constructs a legacy Resource object from an -// addrs.ResourceInstance value. -// -// This is provided to shim to old codepaths that haven't been updated away -// from this type yet. Since this old type is not able to represent instances -// that have string keys, this function will panic if given a resource address -// that has a string key. -func NewResource(addr addrs.ResourceInstance) *Resource { - ret := &Resource{ - Name: addr.Resource.Name, - Type: addr.Resource.Type, - } - - if addr.Key != addrs.NoKey { - switch tk := addr.Key.(type) { - case addrs.IntKey: - ret.CountIndex = int(tk) - default: - panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) - } - } - - return ret -} - -// ResourceKind specifies what kind of instance we're working with, whether -// its a primary instance, a tainted instance, or an orphan. -type ResourceFlag byte - -// InstanceInfo is used to hold information about the instance and/or -// resource being modified. -type InstanceInfo struct { - // Id is a unique name to represent this instance. This is not related - // to InstanceState.ID in any way. - Id string - - // ModulePath is the complete path of the module containing this - // instance. - ModulePath []string - - // Type is the resource type of this instance - Type string - - // uniqueExtra is an internal field that can be populated to supply - // extra metadata that is used to identify a unique instance in - // the graph walk. This will be appended to HumanID when uniqueId - // is called. - uniqueExtra string -} - -// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. -// -// InstanceInfo is a legacy type, and uses of it should be gradually replaced -// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as -// appropriate. -// -// The legacy InstanceInfo type cannot represent module instances with instance -// keys, so this function will panic if given such a path. Uses of this type -// should all be removed or replaced before implementing "count" and "for_each" -// arguments on modules in order to avoid such panics. -// -// This legacy type also cannot represent resource instances with string -// instance keys. It will panic if the given key is not either NoKey or an -// IntKey. -func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { - // We need an old-style []string module path for InstanceInfo. - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - panic("NewInstanceInfo cannot convert module instance with key") - } - path[i] = step.Name - } - - // This is a funny old meaning of "id" that is no longer current. It should - // not be used for anything users might see. Note that it does not include - // a representation of the resource mode, and so it's impossible to - // determine from an InstanceInfo alone whether it is a managed or data - // resource that is being referred to. - id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) - if addr.Resource.Resource.Mode == addrs.DataResourceMode { - id = "data." + id - } - if addr.Resource.Key != addrs.NoKey { - switch k := addr.Resource.Key.(type) { - case addrs.IntKey: - id = id + fmt.Sprintf(".%d", int(k)) - default: - panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) - } - } - - return &InstanceInfo{ - Id: id, - ModulePath: path, - Type: addr.Resource.Resource.Type, - } -} - -// ResourceAddress returns the address of the resource that the receiver is describing. -func (i *InstanceInfo) ResourceAddress() *ResourceAddress { - // GROSS: for tainted and deposed instances, their status gets appended - // to i.Id to create a unique id for the graph node. Historically these - // ids were displayed to the user, so it's designed to be human-readable: - // "aws_instance.bar.0 (deposed #0)" - // - // So here we detect such suffixes and try to interpret them back to - // their original meaning so we can then produce a ResourceAddress - // with a suitable InstanceType. - id := i.Id - instanceType := TypeInvalid - if idx := strings.Index(id, " ("); idx != -1 { - remain := id[idx:] - id = id[:idx] - - switch { - case strings.Contains(remain, "tainted"): - instanceType = TypeTainted - case strings.Contains(remain, "deposed"): - instanceType = TypeDeposed - } - } - - addr, err := parseResourceAddressInternal(id) - if err != nil { - // should never happen, since that would indicate a bug in the - // code that constructed this InstanceInfo. - panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) - } - if len(i.ModulePath) > 1 { - addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied - } - if instanceType != TypeInvalid { - addr.InstanceTypeSet = true - addr.InstanceType = instanceType - } - return addr -} - -// ResourceConfig is a legacy type that was formerly used to represent -// interpolatable configuration blocks. It is now only used to shim to old -// APIs that still use this type, via NewResourceConfigShimmed. -type ResourceConfig struct { - ComputedKeys []string - Raw map[string]interface{} - Config map[string]interface{} - - raw *config.RawConfig -} - -// NewResourceConfig creates a new ResourceConfig from a config.RawConfig. -func NewResourceConfig(c *config.RawConfig) *ResourceConfig { - result := &ResourceConfig{raw: c} - result.interpolateForce() - return result -} - -// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly -// the given value. -// -// The given value may contain hcl2shim.UnknownVariableValue to signal that -// something is computed, but it must not contain unprocessed interpolation -// sequences as we might've seen in Terraform v0.11 and prior. -func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { - v := hcl2shim.HCL2ValueFromConfigValue(raw) - - // This is a little weird but we round-trip the value through the hcl2shim - // package here for two reasons: firstly, because that reduces the risk - // of it including something unlike what NewResourceConfigShimmed would - // produce, and secondly because it creates a copy of "raw" just in case - // something is relying on the fact that in the old world the raw and - // config maps were always distinct, and thus you could in principle mutate - // one without affecting the other. (I sure hope nobody was doing that, though!) - cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) - - return &ResourceConfig{ - Raw: raw, - Config: cfg, - - ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), - } -} - -// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy -// ResourceConfig object, so that it can be passed to older APIs that expect -// this wrapping. -// -// The returned ResourceConfig is already interpolated and cannot be -// re-interpolated. It is, therefore, useful only to functions that expect -// an already-populated ResourceConfig which they then treat as read-only. -// -// If the given value is not of an object type that conforms to the given -// schema then this function will panic. -func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { - if !val.Type().IsObjectType() { - panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) - } - ret := &ResourceConfig{} - - legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) - if legacyVal != nil { - ret.Config = legacyVal - - // Now we need to walk through our structure and find any unknown values, - // producing the separate list ComputedKeys to represent these. We use the - // schema here so that we can preserve the expected invariant - // that an attribute is always either wholly known or wholly unknown, while - // a child block can be partially unknown. - ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") - } else { - ret.Config = make(map[string]interface{}) - } - ret.Raw = ret.Config - - return ret -} - -// Record the any config values in ComputedKeys. This field had been unused in -// helper/schema, but in the new protocol we're using this so that the SDK can -// now handle having an unknown collection. The legacy diff code doesn't -// properly handle the unknown, because it can't be expressed in the same way -// between the config and diff. -func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { - var ret []string - ty := val.Type() - - if val.IsNull() { - return ret - } - - if !val.IsKnown() { - // we shouldn't have an entirely unknown resource, but prevent empty - // strings just in case - if len(path) > 0 { - ret = append(ret, path) - } - return ret - } - - if path != "" { - path += "." - } - switch { - case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): - i := 0 - for it := val.ElementIterator(); it.Next(); i++ { - _, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) - ret = append(ret, keys...) - } - - case ty.IsMapType(), ty.IsObjectType(): - for it := val.ElementIterator(); it.Next(); { - subK, subVal := it.Element() - keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) - ret = append(ret, keys...) - } - } - - return ret -} - -// DeepCopy performs a deep copy of the configuration. This makes it safe -// to modify any of the structures that are part of the resource config without -// affecting the original configuration. -func (c *ResourceConfig) DeepCopy() *ResourceConfig { - // DeepCopying a nil should return a nil to avoid panics - if c == nil { - return nil - } - - // Copy, this will copy all the exported attributes - copy, err := copystructure.Config{Lock: true}.Copy(c) - if err != nil { - panic(err) - } - - // Force the type - result := copy.(*ResourceConfig) - - return result -} - -// Equal checks the equality of two resource configs. -func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { - // If either are nil, then they're only equal if they're both nil - if c == nil || c2 == nil { - return c == c2 - } - - // Sort the computed keys so they're deterministic - sort.Strings(c.ComputedKeys) - sort.Strings(c2.ComputedKeys) - - // Two resource configs if their exported properties are equal. - // We don't compare "raw" because it is never used again after - // initialization and for all intents and purposes they are equal - // if the exported properties are equal. - check := [][2]interface{}{ - {c.ComputedKeys, c2.ComputedKeys}, - {c.Raw, c2.Raw}, - {c.Config, c2.Config}, - } - for _, pair := range check { - if !reflect.DeepEqual(pair[0], pair[1]) { - return false - } - } - - return true -} - -// CheckSet checks that the given list of configuration keys is -// properly set. If not, errors are returned for each unset key. -// -// This is useful to be called in the Validate method of a ResourceProvider. -func (c *ResourceConfig) CheckSet(keys []string) []error { - var errs []error - - for _, k := range keys { - if !c.IsSet(k) { - errs = append(errs, fmt.Errorf("%s must be set", k)) - } - } - - return errs -} - -// Get looks up a configuration value by key and returns the value. -// -// The second return value is true if the get was successful. Get will -// return the raw value if the key is computed, so you should pair this -// with IsComputed. -func (c *ResourceConfig) Get(k string) (interface{}, bool) { - // We aim to get a value from the configuration. If it is computed, - // then we return the pure raw value. - source := c.Config - if c.IsComputed(k) { - source = c.Raw - } - - return c.get(k, source) -} - -// GetRaw looks up a configuration value by key and returns the value, -// from the raw, uninterpolated config. -// -// The second return value is true if the get was successful. Get will -// not succeed if the value is being computed. -func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { - return c.get(k, c.Raw) -} - -// IsComputed returns whether the given key is computed or not. -func (c *ResourceConfig) IsComputed(k string) bool { - // The next thing we do is check the config if we get a computed - // value out of it. - v, ok := c.get(k, c.Config) - if !ok { - return false - } - - // If value is nil, then it isn't computed - if v == nil { - return false - } - - // Test if the value contains an unknown value - var w unknownCheckWalker - if err := reflectwalk.Walk(v, &w); err != nil { - panic(err) - } - - return w.Unknown -} - -// IsSet checks if the key in the configuration is set. A key is set if -// it has a value or the value is being computed (is unknown currently). -// -// This function should be used rather than checking the keys of the -// raw configuration itself, since a key may be omitted from the raw -// configuration if it is being computed. -func (c *ResourceConfig) IsSet(k string) bool { - if c == nil { - return false - } - - if c.IsComputed(k) { - return true - } - - if _, ok := c.Get(k); ok { - return true - } - - return false -} - -func (c *ResourceConfig) get( - k string, raw map[string]interface{}) (interface{}, bool) { - parts := strings.Split(k, ".") - if len(parts) == 1 && parts[0] == "" { - parts = nil - } - - var current interface{} = raw - var previous interface{} = nil - for i, part := range parts { - if current == nil { - return nil, false - } - - cv := reflect.ValueOf(current) - switch cv.Kind() { - case reflect.Map: - previous = current - v := cv.MapIndex(reflect.ValueOf(part)) - if !v.IsValid() { - if i > 0 && i != (len(parts)-1) { - tryKey := strings.Join(parts[i:], ".") - v := cv.MapIndex(reflect.ValueOf(tryKey)) - if !v.IsValid() { - return nil, false - } - - return v.Interface(), true - } - - return nil, false - } - - current = v.Interface() - case reflect.Slice: - previous = current - - if part == "#" { - // If any value in a list is computed, this whole thing - // is computed and we can't read any part of it. - for i := 0; i < cv.Len(); i++ { - if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { - return v, true - } - } - - current = cv.Len() - } else { - i, err := strconv.ParseInt(part, 0, 0) - if err != nil { - return nil, false - } - if int(i) < 0 || int(i) >= cv.Len() { - return nil, false - } - current = cv.Index(int(i)).Interface() - } - case reflect.String: - // This happens when map keys contain "." and have a common - // prefix so were split as path components above. - actualKey := strings.Join(parts[i-1:], ".") - if prevMap, ok := previous.(map[string]interface{}); ok { - v, ok := prevMap[actualKey] - return v, ok - } - - return nil, false - default: - panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) - } - } - - return current, true -} - -// interpolateForce is a temporary thing. We want to get rid of interpolate -// above and likewise this, but it can only be done after the f-ast-graph -// refactor is complete. -func (c *ResourceConfig) interpolateForce() { - if c.raw == nil { - // If we don't have a lowercase "raw" but we _do_ have the uppercase - // Raw populated then this indicates that we're recieving a shim - // ResourceConfig created by NewResourceConfigShimmed, which is already - // fully evaluated and thus this function doesn't need to do anything. - if c.Raw != nil { - return - } - - var err error - c.raw, err = config.NewRawConfig(make(map[string]interface{})) - if err != nil { - panic(err) - } - } - - c.ComputedKeys = c.raw.UnknownKeys() - c.Raw = c.raw.RawMap() - c.Config = c.raw.Config() -} - -// unknownCheckWalker -type unknownCheckWalker struct { - Unknown bool -} - -func (w *unknownCheckWalker) Primitive(v reflect.Value) error { - if v.Interface() == hcl2shim.UnknownVariableValue { - w.Unknown = true - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go deleted file mode 100644 index ca833fe1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ /dev/null @@ -1,618 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// ResourceAddress is a way of identifying an individual resource (or, -// eventually, a subset of resources) within the state. It is used for Targets. -type ResourceAddress struct { - // Addresses a resource falling somewhere in the module path - // When specified alone, addresses all resources within a module path - Path []string - - // Addresses a specific resource that occurs in a list - Index int - - InstanceType InstanceType - InstanceTypeSet bool - Name string - Type string - Mode ResourceMode // significant only if InstanceTypeSet -} - -// Copy returns a copy of this ResourceAddress -func (r *ResourceAddress) Copy() *ResourceAddress { - if r == nil { - return nil - } - - n := &ResourceAddress{ - Path: make([]string, 0, len(r.Path)), - Index: r.Index, - InstanceType: r.InstanceType, - Name: r.Name, - Type: r.Type, - Mode: r.Mode, - } - - n.Path = append(n.Path, r.Path...) - - return n -} - -// String outputs the address that parses into this address. -func (r *ResourceAddress) String() string { - var result []string - for _, p := range r.Path { - result = append(result, "module", p) - } - - switch r.Mode { - case ManagedResourceMode: - // nothing to do - case DataResourceMode: - result = append(result, "data") - default: - panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) - } - - if r.Type != "" { - result = append(result, r.Type) - } - - if r.Name != "" { - name := r.Name - if r.InstanceTypeSet { - switch r.InstanceType { - case TypePrimary: - name += ".primary" - case TypeDeposed: - name += ".deposed" - case TypeTainted: - name += ".tainted" - } - } - - if r.Index >= 0 { - name += fmt.Sprintf("[%d]", r.Index) - } - result = append(result, name) - } - - return strings.Join(result, ".") -} - -// HasResourceSpec returns true if the address has a resource spec, as -// defined in the documentation: -// https://www.terraform.io/docs/internals/resource-addressing.html -// In particular, this returns false if the address contains only -// a module path, thus addressing the entire module. -func (r *ResourceAddress) HasResourceSpec() bool { - return r.Type != "" && r.Name != "" -} - -// WholeModuleAddress returns the resource address that refers to all -// resources in the same module as the receiver address. -func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { - return &ResourceAddress{ - Path: r.Path, - Index: -1, - InstanceTypeSet: false, - } -} - -// MatchesResourceConfig returns true if the receiver matches the given -// configuration resource within the given _static_ module path. Note that -// the module path in a resource address is a _dynamic_ module path, and -// multiple dynamic resource paths may map to a single static path if -// count and for_each are in use on module calls. -// -// Since resource configuration blocks represent all of the instances of -// a multi-instance resource, the index of the address (if any) is not -// considered. -func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { - if r.HasResourceSpec() { - // FIXME: Some ugliness while we are between worlds. Functionality - // in "addrs" should eventually replace this ResourceAddress idea - // completely, but for now we'll need to translate to the old - // way of representing resource modes. - switch r.Mode { - case ManagedResourceMode: - if rc.Mode != addrs.ManagedResourceMode { - return false - } - case DataResourceMode: - if rc.Mode != addrs.DataResourceMode { - return false - } - } - if r.Type != rc.Type || r.Name != rc.Name { - return false - } - } - - addrPath := r.Path - - // normalize - if len(addrPath) == 0 { - addrPath = nil - } - if len(path) == 0 { - path = nil - } - rawPath := []string(path) - return reflect.DeepEqual(addrPath, rawPath) -} - -// stateId returns the ID that this resource should be entered with -// in the state. This is also used for diffs. In the future, we'd like to -// move away from this string field so I don't export this. -func (r *ResourceAddress) stateId() string { - result := fmt.Sprintf("%s.%s", r.Type, r.Name) - switch r.Mode { - case ManagedResourceMode: - // Done - case DataResourceMode: - result = fmt.Sprintf("data.%s", result) - default: - panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) - } - if r.Index >= 0 { - result += fmt.Sprintf(".%d", r.Index) - } - - return result -} - -// parseResourceAddressInternal parses the somewhat bespoke resource -// identifier used in states and diffs, such as "instance.name.0". -func parseResourceAddressInternal(s string) (*ResourceAddress, error) { - // Split based on ".". Every resource address should have at least two - // elements (type and name). - parts := strings.Split(s, ".") - if len(parts) < 2 || len(parts) > 4 { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Data resource if we have at least 3 parts and the first one is data - mode := ManagedResourceMode - if len(parts) > 2 && parts[0] == "data" { - mode = DataResourceMode - parts = parts[1:] - } - - // If we're not a data resource and we have more than 3, then it is an error - if len(parts) > 3 && mode != DataResourceMode { - return nil, fmt.Errorf("Invalid internal resource address format: %s", s) - } - - // Build the parts of the resource address that are guaranteed to exist - addr := &ResourceAddress{ - Type: parts[0], - Name: parts[1], - Index: -1, - InstanceType: TypePrimary, - Mode: mode, - } - - // If we have more parts, then we have an index. Parse that. - if len(parts) > 2 { - idx, err := strconv.ParseInt(parts[2], 0, 0) - if err != nil { - return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) - } - - addr.Index = int(idx) - } - - return addr, nil -} - -func ParseResourceAddress(s string) (*ResourceAddress, error) { - matches, err := tokenizeResourceAddress(s) - if err != nil { - return nil, err - } - mode := ManagedResourceMode - if matches["data_prefix"] != "" { - mode = DataResourceMode - } - resourceIndex, err := ParseResourceIndex(matches["index"]) - if err != nil { - return nil, err - } - instanceType, err := ParseInstanceType(matches["instance_type"]) - if err != nil { - return nil, err - } - path := ParseResourcePath(matches["path"]) - - // not allowed to say "data." without a type following - if mode == DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf( - "invalid resource address %q: must target specific data instance", - s, - ) - } - - return &ResourceAddress{ - Path: path, - Index: resourceIndex, - InstanceType: instanceType, - InstanceTypeSet: matches["instance_type"] != "", - Name: matches["name"], - Type: matches["type"], - Mode: mode, - }, nil -} - -// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a -// resource name as described in a module diff. -// -// For historical reasons a different addressing format is used in this -// context. The internal format should not be shown in the UI and instead -// this function should be used to translate to a ResourceAddress and -// then, where appropriate, use the String method to produce a canonical -// resource address string for display in the UI. -// -// The given path slice must be empty (or nil) for the root module, and -// otherwise consist of a sequence of module names traversing down into -// the module tree. If a non-nil path is provided, the caller must not -// modify its underlying array after passing it to this function. -func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { - addr, err := parseResourceAddressInternal(key) - if err != nil { - return nil, err - } - addr.Path = path - return addr, nil -} - -// NewLegacyResourceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Type, - Name: addr.Resource.Name, - } - - switch addr.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - ret.Index = -1 - - return ret -} - -// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style -// addrs.AbsResource value. -// -// This is provided for shimming purposes so that we can still easily call into -// older functions that expect the ResourceAddress type. -func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { - ret := &ResourceAddress{ - Type: addr.Resource.Resource.Type, - Name: addr.Resource.Resource.Name, - } - - switch addr.Resource.Resource.Mode { - case addrs.ManagedResourceMode: - ret.Mode = ManagedResourceMode - case addrs.DataResourceMode: - ret.Mode = DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) - } - - path := make([]string, len(addr.Module)) - for i, step := range addr.Module { - if step.InstanceKey != addrs.NoKey { - // At the time of writing this can't happen because we don't - // ket generate keyed module instances. This legacy codepath must - // be removed before we can support "count" and "for_each" for - // modules. - panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) - } - - path[i] = step.Name - } - ret.Path = path - - if addr.Resource.Key == addrs.NoKey { - ret.Index = -1 - } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { - ret.Index = int(ik) - } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { - ret.Index = -1 - } else { - panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) - } - - return ret -} - -// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to -// the new resource address type addrs.AbsResourceInstance. -// -// This method can be used only on an address that has a resource specification. -// It will panic if called on a module-path-only ResourceAddress. Use -// method HasResourceSpec to check before calling, in contexts where it is -// unclear. -// -// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" -// states, and so if these are present on the receiver then they are discarded. -// -// This is provided for shimming purposes so that we can easily adapt functions -// that are returning the legacy ResourceAddress type, for situations where -// the new type is required. -func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { - if !addr.HasResourceSpec() { - panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") - } - - ret := addrs.AbsResourceInstance{ - Module: addr.ModuleInstanceAddr(), - Resource: addrs.ResourceInstance{ - Resource: addrs.Resource{ - Type: addr.Type, - Name: addr.Name, - }, - }, - } - - switch addr.Mode { - case ManagedResourceMode: - ret.Resource.Resource.Mode = addrs.ManagedResourceMode - case DataResourceMode: - ret.Resource.Resource.Mode = addrs.DataResourceMode - default: - panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) - } - - if addr.Index != -1 { - ret.Resource.Key = addrs.IntKey(addr.Index) - } - - return ret -} - -// ModuleInstanceAddr returns the module path portion of the receiver as a -// addrs.ModuleInstance value. -func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { - path := make(addrs.ModuleInstance, len(addr.Path)) - for i, name := range addr.Path { - path[i] = addrs.ModuleInstanceStep{Name: name} - } - return path -} - -// Contains returns true if and only if the given node is contained within -// the receiver. -// -// Containment is defined in terms of the module and resource heirarchy: -// a resource is contained within its module and any ancestor modules, -// an indexed resource instance is contained with the unindexed resource, etc. -func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { - ourPath := addr.Path - givenPath := other.Path - if len(givenPath) < len(ourPath) { - return false - } - for i := range ourPath { - if ourPath[i] != givenPath[i] { - return false - } - } - - // If the receiver is a whole-module address then the path prefix - // matching is all we need. - if !addr.HasResourceSpec() { - return true - } - - if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { - return false - } - - if addr.Index != -1 && addr.Index != other.Index { - return false - } - - if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { - return false - } - - return true -} - -// Equals returns true if the receiver matches the given address. -// -// The name of this method is a misnomer, since it doesn't test for exact -// equality. Instead, it tests that the _specified_ parts of each -// address match, treating any unspecified parts as wildcards. -// -// See also Contains, which takes a more heirarchical approach to comparing -// addresses. -func (addr *ResourceAddress) Equals(raw interface{}) bool { - other, ok := raw.(*ResourceAddress) - if !ok { - return false - } - - pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || - reflect.DeepEqual(addr.Path, other.Path) - - indexMatch := addr.Index == -1 || - other.Index == -1 || - addr.Index == other.Index - - nameMatch := addr.Name == "" || - other.Name == "" || - addr.Name == other.Name - - typeMatch := addr.Type == "" || - other.Type == "" || - addr.Type == other.Type - - // mode is significant only when type is set - modeMatch := addr.Type == "" || - other.Type == "" || - addr.Mode == other.Mode - - return pathMatch && - indexMatch && - addr.InstanceType == other.InstanceType && - nameMatch && - typeMatch && - modeMatch -} - -// Less returns true if and only if the receiver should be sorted before -// the given address when presenting a list of resource addresses to -// an end-user. -// -// This sort uses lexicographic sorting for most components, but uses -// numeric sort for indices, thus causing index 10 to sort after -// index 9, rather than after index 1. -func (addr *ResourceAddress) Less(other *ResourceAddress) bool { - - switch { - - case len(addr.Path) != len(other.Path): - return len(addr.Path) < len(other.Path) - - case !reflect.DeepEqual(addr.Path, other.Path): - // If the two paths are the same length but don't match, we'll just - // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn, and lexicographic - // comparison is correct for the module path portion. - addrStr := addr.String() - otherStr := other.String() - return addrStr < otherStr - - case addr.Mode != other.Mode: - return addr.Mode == DataResourceMode - - case addr.Type != other.Type: - return addr.Type < other.Type - - case addr.Name != other.Name: - return addr.Name < other.Name - - case addr.Index != other.Index: - // Since "Index" is -1 for an un-indexed address, this also conveniently - // sorts unindexed addresses before indexed ones, should they both - // appear for some reason. - return addr.Index < other.Index - - case addr.InstanceTypeSet != other.InstanceTypeSet: - return !addr.InstanceTypeSet - - case addr.InstanceType != other.InstanceType: - // InstanceType is actually an enum, so this is just an arbitrary - // sort based on the enum numeric values, and thus not particularly - // meaningful. - return addr.InstanceType < other.InstanceType - - default: - return false - - } -} - -func ParseResourceIndex(s string) (int, error) { - if s == "" { - return -1, nil - } - return strconv.Atoi(s) -} - -func ParseResourcePath(s string) []string { - if s == "" { - return nil - } - parts := strings.Split(s, ".") - path := make([]string, 0, len(parts)) - for _, s := range parts { - // Due to the limitations of the regexp match below, the path match has - // some noise in it we have to filter out :| - if s == "" || s == "module" { - continue - } - path = append(path, s) - } - return path -} - -func ParseInstanceType(s string) (InstanceType, error) { - switch s { - case "", "primary": - return TypePrimary, nil - case "deposed": - return TypeDeposed, nil - case "tainted": - return TypeTainted, nil - default: - return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) - } -} - -func tokenizeResourceAddress(s string) (map[string]string, error) { - // Example of portions of the regexp below using the - // string "aws_instance.web.tainted[1]" - re := regexp.MustCompile(`\A` + - // "module.foo.module.bar" (optional) - `(?P(?:module\.(?P[^.]+)\.?)*)` + - // possibly "data.", if targeting is a data resource - `(?P(?:data\.)?)` + - // "aws_instance.web" (optional when module path specified) - `(?:(?P[^.]+)\.(?P[^.[]+))?` + - // "tainted" (optional, omission implies: "primary") - `(?:\.(?P\w+))?` + - // "1" (optional, omission implies: "0") - `(?:\[(?P\d+)\])?` + - `\z`) - - groupNames := re.SubexpNames() - rawMatches := re.FindAllStringSubmatch(s, -1) - if len(rawMatches) != 1 { - return nil, fmt.Errorf("invalid resource address %q", s) - } - - matches := make(map[string]string) - for i, m := range rawMatches[0] { - matches[groupNames[i]] = m - } - - return matches, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go deleted file mode 100644 index 27f98c0f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ /dev/null @@ -1,322 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/plugin/discovery" - "github.com/hashicorp/terraform/providers" -) - -// ResourceProvider is an interface that must be implemented by any -// resource provider: the thing that creates and manages the resources in -// a Terraform configuration. -// -// Important implementation note: All returned pointers, such as -// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to -// shared data. Terraform is highly parallel and assumes that this data is safe -// to read/write in parallel so it must be unique references. Note that it is -// safe to return arguments as results, however. -type ResourceProvider interface { - /********************************************************************* - * Functions related to the provider - *********************************************************************/ - - // ProviderSchema returns the config schema for the main provider - // configuration, as would appear in a "provider" block in the - // configuration files. - // - // Currently not all providers support schema. Callers must therefore - // first call Resources and DataSources and ensure that at least one - // resource or data source has the SchemaAvailable flag set. - GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) - - // Input was used prior to v0.12 to ask the provider to prompt the user - // for input to complete the configuration. - // - // From v0.12 onwards this method is never called because Terraform Core - // is able to handle the necessary input logic itself based on the - // schema returned from GetSchema. - Input(UIInput, *ResourceConfig) (*ResourceConfig, error) - - // Validate is called once at the beginning with the raw configuration - // (no interpolation done) and can return a list of warnings and/or - // errors. - // - // This is called once with the provider configuration only. It may not - // be called at all if no provider configuration is given. - // - // This should not assume that any values of the configurations are valid. - // The primary use case of this call is to check that required keys are - // set. - Validate(*ResourceConfig) ([]string, []error) - - // Configure configures the provider itself with the configuration - // given. This is useful for setting things like access keys. - // - // This won't be called at all if no provider configuration is given. - // - // Configure returns an error if it occurred. - Configure(*ResourceConfig) error - - // Resources returns all the available resource types that this provider - // knows how to manage. - Resources() []ResourceType - - // Stop is called when the provider should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - /********************************************************************* - * Functions related to individual resources - *********************************************************************/ - - // ValidateResource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateResource(string, *ResourceConfig) ([]string, []error) - - // Apply applies a diff to a specific resource and returns the new - // resource state along with an error. - // - // If the resource state given has an empty ID, then a new resource - // is expected to be created. - Apply( - *InstanceInfo, - *InstanceState, - *InstanceDiff) (*InstanceState, error) - - // Diff diffs a resource versus a desired state and returns - // a diff. - Diff( - *InstanceInfo, - *InstanceState, - *ResourceConfig) (*InstanceDiff, error) - - // Refresh refreshes a resource and updates all of its attributes - // with the latest information. - Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) - - /********************************************************************* - * Functions related to importing - *********************************************************************/ - - // ImportState requests that the given resource be imported. - // - // The returned InstanceState only requires ID be set. Importing - // will always call Refresh after the state to complete it. - // - // IMPORTANT: InstanceState doesn't have the resource type attached - // to it. A type must be specified on the state via the Ephemeral - // field on the state. - // - // This function can return multiple states. Normally, an import - // will map 1:1 to a physical resource. However, some resources map - // to multiple. For example, an AWS security group may contain many rules. - // Each rule is represented by a separate resource in Terraform, - // therefore multiple states are returned. - ImportState(*InstanceInfo, string) ([]*InstanceState, error) - - /********************************************************************* - * Functions related to data resources - *********************************************************************/ - - // ValidateDataSource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per data source instance. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateDataSource(string, *ResourceConfig) ([]string, []error) - - // DataSources returns all of the available data sources that this - // provider implements. - DataSources() []DataSource - - // ReadDataDiff produces a diff that represents the state that will - // be produced when the given data source is read using a later call - // to ReadDataApply. - ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - - // ReadDataApply initializes a data instance using the configuration - // in a diff produced by ReadDataDiff. - ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) -} - -// ResourceProviderCloser is an interface that providers that can close -// connections that aren't needed anymore must implement. -type ResourceProviderCloser interface { - Close() error -} - -// ResourceType is a type of resource that a resource provider can manage. -type ResourceType struct { - Name string // Name of the resource, example "instance" (no provider prefix) - Importable bool // Whether this resource supports importing - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// DataSource is a data source that a resource provider implements. -type DataSource struct { - Name string - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// ResourceProviderResolver is an interface implemented by objects that are -// able to resolve a given set of resource provider version constraints -// into ResourceProviderFactory callbacks. -type ResourceProviderResolver interface { - // Given a constraint map, return a ResourceProviderFactory for each - // requested provider. If some or all of the constraints cannot be - // satisfied, return a non-nil slice of errors describing the problems. - ResolveProviders(reqd discovery.PluginRequirements) (map[addrs.Provider]ResourceProviderFactory, []error) -} - -// ResourceProviderResolverFunc wraps a callback function and turns it into -// a ResourceProviderResolver implementation, for convenience in situations -// where a function and its associated closure are sufficient as a resolver -// implementation. -type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[addrs.Provider]ResourceProviderFactory, []error) - -// ResolveProviders implements ResourceProviderResolver by calling the -// wrapped function. -func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[addrs.Provider]ResourceProviderFactory, []error) { - return f(reqd) -} - -// ResourceProviderResolverFixed returns a ResourceProviderResolver that -// has a fixed set of provider factories provided by the caller. The returned -// resolver ignores version constraints entirely and just returns the given -// factory for each requested provider name. -// -// This function is primarily used in tests, to provide mock providers or -// in-process providers under test. -func ResourceProviderResolverFixed(factories map[addrs.Provider]ResourceProviderFactory) ResourceProviderResolver { - return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[addrs.Provider]ResourceProviderFactory, []error) { - ret := make(map[addrs.Provider]ResourceProviderFactory, len(reqd)) - var errs []error - for name := range reqd { - // Provider Source Readiness! - fqn := addrs.NewLegacyProvider(name) - if factory, exists := factories[fqn]; exists { - ret[fqn] = factory - } else { - errs = append(errs, fmt.Errorf("provider %q is not available", name)) - } - } - return ret, errs - }) -} - -// ResourceProviderFactory is a function type that creates a new instance -// of a resource provider. -type ResourceProviderFactory func() (ResourceProvider, error) - -// ResourceProviderFactoryFixed is a helper that creates a -// ResourceProviderFactory that just returns some fixed provider. -func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { - return func() (ResourceProvider, error) { - return p, nil - } -} - -func ProviderHasResource(p ResourceProvider, n string) bool { - for _, rt := range p.Resources() { - if rt.Name == n { - return true - } - } - - return false -} - -func ProviderHasDataSource(p ResourceProvider, n string) bool { - for _, rt := range p.DataSources() { - if rt.Name == n { - return true - } - } - - return false -} - -// resourceProviderFactories matches available plugins to the given version -// requirements to produce a map of compatible provider plugins if possible, -// or an error if the currently-available plugins are insufficient. -// -// This should be called only with configurations that have passed calls -// to config.Validate(), which ensures that all of the given version -// constraints are valid. It will panic if any invalid constraints are present. -func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[addrs.Provider]providers.Factory, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - ret, errs := resolver.ResolveProviders(reqd) - if errs != nil { - diags = diags.Append( - tfdiags.Sourceless(tfdiags.Error, - "Could not satisfy plugin requirements", - errPluginInit, - ), - ) - - for _, err := range errs { - diags = diags.Append(err) - } - - return nil, diags - } - - return ret, nil -} - -const errPluginInit = ` -Plugin reinitialization required. Please run "terraform init". - -Plugins are external binaries that Terraform uses to access and manipulate -resources. The configuration provided requires plugins which can't be located, -don't satisfy the version constraints, or are otherwise incompatible. - -Terraform automatically discovers provider requirements from your -configuration, including providers used in child modules. To see the -requirements and constraints from each module, run "terraform providers". -` diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go deleted file mode 100644 index 4000e3d2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ /dev/null @@ -1,315 +0,0 @@ -package terraform - -import ( - "sync" -) - -// MockResourceProvider implements ResourceProvider but mocks out all the -// calls for testing purposes. -type MockResourceProvider struct { - sync.Mutex - - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - CloseCalled bool - CloseError error - GetSchemaCalled bool - GetSchemaRequest *ProviderSchemaRequest - GetSchemaReturn *ProviderSchema - GetSchemaReturnError error - InputCalled bool - InputInput UIInput - InputConfig *ResourceConfig - InputReturnConfig *ResourceConfig - InputReturnError error - InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) - ApplyCalled bool - ApplyInfo *InstanceInfo - ApplyState *InstanceState - ApplyDiff *InstanceDiff - ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) - ApplyReturn *InstanceState - ApplyReturnError error - ConfigureCalled bool - ConfigureConfig *ResourceConfig - ConfigureFn func(*ResourceConfig) error - ConfigureReturnError error - DiffCalled bool - DiffInfo *InstanceInfo - DiffState *InstanceState - DiffDesired *ResourceConfig - DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) - DiffReturn *InstanceDiff - DiffReturnError error - RefreshCalled bool - RefreshInfo *InstanceInfo - RefreshState *InstanceState - RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) - RefreshReturn *InstanceState - RefreshReturnError error - ResourcesCalled bool - ResourcesReturn []ResourceType - ReadDataApplyCalled bool - ReadDataApplyInfo *InstanceInfo - ReadDataApplyDiff *InstanceDiff - ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) - ReadDataApplyReturn *InstanceState - ReadDataApplyReturnError error - ReadDataDiffCalled bool - ReadDataDiffInfo *InstanceInfo - ReadDataDiffDesired *ResourceConfig - ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - ReadDataDiffReturn *InstanceDiff - ReadDataDiffReturnError error - StopCalled bool - StopFn func() error - StopReturnError error - DataSourcesCalled bool - DataSourcesReturn []DataSource - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(*ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateResourceCalled bool - ValidateResourceType string - ValidateResourceConfig *ResourceConfig - ValidateResourceReturnWarns []string - ValidateResourceReturnErrors []error - ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) - ValidateDataSourceCalled bool - ValidateDataSourceType string - ValidateDataSourceConfig *ResourceConfig - ValidateDataSourceReturnWarns []string - ValidateDataSourceReturnErrors []error - - ImportStateCalled bool - ImportStateInfo *InstanceInfo - ImportStateID string - ImportStateReturn []*InstanceState - ImportStateReturnError error - ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) -} - -func (p *MockResourceProvider) Close() error { - p.CloseCalled = true - return p.CloseError -} - -func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { - p.Lock() - defer p.Unlock() - - p.GetSchemaCalled = true - p.GetSchemaRequest = req - return p.GetSchemaReturn, p.GetSchemaReturnError -} - -func (p *MockResourceProvider) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - p.Lock() - defer p.Unlock() - p.InputCalled = true - p.InputInput = input - p.InputConfig = c - if p.InputFn != nil { - return p.InputFn(input, c) - } - return p.InputReturnConfig, p.InputReturnError -} - -func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateResourceCalled = true - p.ValidateResourceType = t - p.ValidateResourceConfig = c - - if p.ValidateResourceFn != nil { - return p.ValidateResourceFn(t, c) - } - - return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors -} - -func (p *MockResourceProvider) Configure(c *ResourceConfig) error { - p.Lock() - defer p.Unlock() - - p.ConfigureCalled = true - p.ConfigureConfig = c - - if p.ConfigureFn != nil { - return p.ConfigureFn(c) - } - - return p.ConfigureReturnError -} - -func (p *MockResourceProvider) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} - -func (p *MockResourceProvider) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // We only lock while writing data. Reading is fine - p.Lock() - p.ApplyCalled = true - p.ApplyInfo = info - p.ApplyState = state - p.ApplyDiff = diff - p.Unlock() - - if p.ApplyFn != nil { - return p.ApplyFn(info, state, diff) - } - - return p.ApplyReturn.DeepCopy(), p.ApplyReturnError -} - -func (p *MockResourceProvider) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.DiffCalled = true - p.DiffInfo = info - p.DiffState = state - p.DiffDesired = desired - - if p.DiffFn != nil { - return p.DiffFn(info, state, desired) - } - - return p.DiffReturn.DeepCopy(), p.DiffReturnError -} - -func (p *MockResourceProvider) Refresh( - info *InstanceInfo, - s *InstanceState) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.RefreshCalled = true - p.RefreshInfo = info - p.RefreshState = s - - if p.RefreshFn != nil { - return p.RefreshFn(info, s) - } - - return p.RefreshReturn.DeepCopy(), p.RefreshReturnError -} - -func (p *MockResourceProvider) Resources() []ResourceType { - p.Lock() - defer p.Unlock() - - p.ResourcesCalled = true - return p.ResourcesReturn -} - -func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ImportStateCalled = true - p.ImportStateInfo = info - p.ImportStateID = id - if p.ImportStateFn != nil { - return p.ImportStateFn(info, id) - } - - var result []*InstanceState - if p.ImportStateReturn != nil { - result = make([]*InstanceState, len(p.ImportStateReturn)) - for i, v := range p.ImportStateReturn { - result[i] = v.DeepCopy() - } - } - - return result, p.ImportStateReturnError -} - -func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateDataSourceCalled = true - p.ValidateDataSourceType = t - p.ValidateDataSourceConfig = c - - if p.ValidateDataSourceFn != nil { - return p.ValidateDataSourceFn(t, c) - } - - return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors -} - -func (p *MockResourceProvider) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataDiffCalled = true - p.ReadDataDiffInfo = info - p.ReadDataDiffDesired = desired - if p.ReadDataDiffFn != nil { - return p.ReadDataDiffFn(info, desired) - } - - return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError -} - -func (p *MockResourceProvider) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - p.Lock() - defer p.Unlock() - - p.ReadDataApplyCalled = true - p.ReadDataApplyInfo = info - p.ReadDataApplyDiff = d - - if p.ReadDataApplyFn != nil { - return p.ReadDataApplyFn(info, d) - } - - return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError -} - -func (p *MockResourceProvider) DataSources() []DataSource { - p.Lock() - defer p.Unlock() - - p.DataSourcesCalled = true - return p.DataSourcesReturn -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go deleted file mode 100644 index 2743dd7e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go +++ /dev/null @@ -1,70 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/provisioners" -) - -// ResourceProvisioner is an interface that must be implemented by any -// resource provisioner: the thing that initializes resources in -// a Terraform configuration. -type ResourceProvisioner interface { - // GetConfigSchema returns the schema for the provisioner type's main - // configuration block. This is called prior to Validate to enable some - // basic structural validation to be performed automatically and to allow - // the configuration to be properly extracted from potentially-ambiguous - // configuration file formats. - GetConfigSchema() (*configschema.Block, error) - - // Validate is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - Validate(*ResourceConfig) ([]string, []error) - - // Apply runs the provisioner on a specific resource and returns the new - // resource state along with an error. Instead of a diff, the ResourceConfig - // is provided since provisioners only run after a resource has been - // newly created. - Apply(UIOutput, *InstanceState, *ResourceConfig) error - - // Stop is called when the provisioner should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error -} - -// ResourceProvisionerCloser is an interface that provisioners that can close -// connections that aren't needed anymore must implement. -type ResourceProvisionerCloser interface { - Close() error -} - -// ResourceProvisionerFactory is a function type that creates a new instance -// of a resource provisioner. -type ResourceProvisionerFactory func() (ResourceProvisioner, error) - -// ProvisionerFactory is a function type that creates a new instance -// of a provisioners.Interface. -type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go deleted file mode 100644 index 7b88cf73..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go +++ /dev/null @@ -1,87 +0,0 @@ -package terraform - -import ( - "sync" - - "github.com/hashicorp/terraform/configs/configschema" -) - -// MockResourceProvisioner implements ResourceProvisioner but mocks out all the -// calls for testing purposes. -type MockResourceProvisioner struct { - sync.Mutex - // Anything you want, in case you need to store extra data with the mock. - Meta interface{} - - GetConfigSchemaCalled bool - GetConfigSchemaReturnSchema *configschema.Block - GetConfigSchemaReturnError error - - ApplyCalled bool - ApplyOutput UIOutput - ApplyState *InstanceState - ApplyConfig *ResourceConfig - ApplyFn func(*InstanceState, *ResourceConfig) error - ApplyReturnError error - - ValidateCalled bool - ValidateConfig *ResourceConfig - ValidateFn func(c *ResourceConfig) ([]string, []error) - ValidateReturnWarns []string - ValidateReturnErrors []error - - StopCalled bool - StopFn func() error - StopReturnError error -} - -var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) - -func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { - p.GetConfigSchemaCalled = true - return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError -} - -func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { - p.Lock() - defer p.Unlock() - - p.ValidateCalled = true - p.ValidateConfig = c - if p.ValidateFn != nil { - return p.ValidateFn(c) - } - return p.ValidateReturnWarns, p.ValidateReturnErrors -} - -func (p *MockResourceProvisioner) Apply( - output UIOutput, - state *InstanceState, - c *ResourceConfig) error { - p.Lock() - - p.ApplyCalled = true - p.ApplyOutput = output - p.ApplyState = state - p.ApplyConfig = c - if p.ApplyFn != nil { - fn := p.ApplyFn - p.Unlock() - return fn(state, c) - } - - defer p.Unlock() - return p.ApplyReturnError -} - -func (p *MockResourceProvisioner) Stop() error { - p.Lock() - defer p.Unlock() - - p.StopCalled = true - if p.StopFn != nil { - return p.StopFn() - } - - return p.StopReturnError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go deleted file mode 100644 index 62991c82..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/schemas.go +++ /dev/null @@ -1,278 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// Schemas is a container for various kinds of schema that Terraform needs -// during processing. -type Schemas struct { - Providers map[string]*ProviderSchema - Provisioners map[string]*configschema.Block -} - -// ProviderSchema returns the entire ProviderSchema object that was produced -// by the plugin for the given provider, or nil if no such schema is available. -// -// It's usually better to go use the more precise methods offered by type -// Schemas to handle this detail automatically. -func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { - if ss.Providers == nil { - return nil - } - return ss.Providers[typeName] -} - -// ProviderConfig returns the schema for the provider configuration of the -// given provider type, or nil if no such schema is available. -func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { - ps := ss.ProviderSchema(typeName) - if ps == nil { - return nil - } - return ps.Provider -} - -// ResourceTypeConfig returns the schema for the configuration of a given -// resource type belonging to a given provider type, or nil of no such -// schema is available. -// -// In many cases the provider type is inferrable from the resource type name, -// but this is not always true because users can override the provider for -// a resource using the "provider" meta-argument. Therefore it's important to -// always pass the correct provider name, even though it many cases it feels -// redundant. -func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { - ps := ss.ProviderSchema(providerType) - if ps == nil || ps.ResourceTypes == nil { - return nil, 0 - } - return ps.SchemaForResourceType(resourceMode, resourceType) -} - -// ProvisionerConfig returns the schema for the configuration of a given -// provisioner, or nil of no such schema is available. -func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { - return ss.Provisioners[name] -} - -// LoadSchemas searches the given configuration, state and plan (any of which -// may be nil) for constructs that have an associated schema, requests the -// necessary schemas from the given component factory (which must _not_ be nil), -// and returns a single object representing all of the necessary schemas. -// -// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing -// errors across multiple separate objects. Errors here will usually indicate -// either misbehavior on the part of one of the providers or of the provider -// protocol itself. When returned with errors, the returned schemas object is -// still valid but may be incomplete. -func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { - schemas := &Schemas{ - Providers: map[string]*ProviderSchema{}, - Provisioners: map[string]*configschema.Block{}, - } - var diags tfdiags.Diagnostics - - newDiags := loadProviderSchemas(schemas.Providers, config, state, components) - diags = diags.Append(newDiags) - newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) - diags = diags.Append(newDiags) - - return schemas, diags.Err() -} - -func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(typeName string) { - if _, exists := schemas[typeName]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) - provider, err := components.ResourceProvider(typeName, "early/"+typeName) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[typeName] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), - ) - return - } - defer func() { - provider.Close() - }() - - resp := provider.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[typeName] = &ProviderSchema{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), - ) - return - } - - s := &ProviderSchema{ - Provider: resp.Provider.Block, - ResourceTypes: make(map[string]*configschema.Block), - DataSources: make(map[string]*configschema.Block), - - ResourceTypeSchemaVersions: make(map[string]uint64), - } - - if resp.Provider.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), - ) - } - - for t, r := range resp.ResourceTypes { - s.ResourceTypes[t] = r.Block - s.ResourceTypeSchemaVersions[t] = uint64(r.Version) - if r.Version < 0 { - diags = diags.Append( - fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), - ) - } - } - - for t, d := range resp.DataSources { - s.DataSources[t] = d.Block - if d.Version < 0 { - // We're not using the version numbers here yet, but we'll check - // for validity anyway in case we start using them in future. - diags = diags.Append( - fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), - ) - } - } - - schemas[typeName] = s - } - - if config != nil { - for _, typeName := range config.ProviderTypes() { - ensure(typeName) - } - } - - if state != nil { - needed := providers.AddressedTypesAbs(state.ProviderAddrs()) - for _, typeName := range needed { - ensure(typeName) - } - } - - return diags -} - -func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - ensure := func(name string) { - if _, exists := schemas[name]; exists { - return - } - - log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) - provisioner, err := components.ResourceProvisioner(name, "early/"+name) - if err != nil { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), - ) - return - } - defer func() { - if closer, ok := provisioner.(ResourceProvisionerCloser); ok { - closer.Close() - } - }() - - resp := provisioner.GetSchema() - if resp.Diagnostics.HasErrors() { - // We'll put a stub in the map so we won't re-attempt this on - // future calls. - schemas[name] = &configschema.Block{} - diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), - ) - return - } - - schemas[name] = resp.Provisioner - } - - if config != nil { - for _, rc := range config.Module.ManagedResources { - for _, pc := range rc.Managed.Provisioners { - ensure(pc.Type) - } - } - - // Must also visit our child modules, recursively. - for _, cc := range config.Children { - childDiags := loadProvisionerSchemas(schemas, cc, components) - diags = diags.Append(childDiags) - } - } - - return diags -} - -// ProviderSchema represents the schema for a provider's own configuration -// and the configuration for some or all of its resources and data sources. -// -// The completeness of this structure depends on how it was constructed. -// When constructed for a configuration, it will generally include only -// resource types and data sources used by that configuration. -type ProviderSchema struct { - Provider *configschema.Block - ResourceTypes map[string]*configschema.Block - DataSources map[string]*configschema.Block - - ResourceTypeSchemaVersions map[string]uint64 -} - -// SchemaForResourceType attempts to find a schema for the given mode and type. -// Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { - switch mode { - case addrs.ManagedResourceMode: - return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] - case addrs.DataResourceMode: - // Data resources don't have schema versions right now, since state is discarded for each refresh - return ps.DataSources[typeName], 0 - default: - // Shouldn't happen, because the above cases are comprehensive. - return nil, 0 - } -} - -// SchemaForResourceAddr attempts to find a schema for the mode and type from -// the given resource address. Returns nil if no such schema is available. -func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { - return ps.SchemaForResourceType(addr.Mode, addr.Type) -} - -// ProviderSchemaRequest is used to describe to a ResourceProvider which -// aspects of schema are required, when calling the GetSchema method. -type ProviderSchemaRequest struct { - ResourceTypes []string - DataSources []string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go deleted file mode 100644 index 2f97e5af..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ /dev/null @@ -1,2253 +0,0 @@ -package terraform - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - uuid "github.com/hashicorp/go-uuid" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/tfdiags" - tfversion "github.com/hashicorp/terraform/version" - "github.com/mitchellh/copystructure" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" -) - -const ( - // StateVersion is the current version for our state file - StateVersion = 3 -) - -// rootModulePath is the path of the root module -var rootModulePath = []string{"root"} - -// normalizeModulePath transforms a legacy module path (which may or may not -// have a redundant "root" label at the start of it) into an -// addrs.ModuleInstance representing the same module. -// -// For legacy reasons, different parts of Terraform disagree about whether the -// root module has the path []string{} or []string{"root"}, and so this -// function accepts both and trims off the "root". An implication of this is -// that it's not possible to actually have a module call in the root module -// that is itself named "root", since that would be ambiguous. -// -// normalizeModulePath takes a raw module path and returns a path that -// has the rootModulePath prepended to it. If I could go back in time I -// would've never had a rootModulePath (empty path would be root). We can -// still fix this but thats a big refactor that my branch doesn't make sense -// for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) addrs.ModuleInstance { - // FIXME: Remove this once everyone is using addrs.ModuleInstance. - - if len(p) > 0 && p[0] == "root" { - p = p[1:] - } - - ret := make(addrs.ModuleInstance, len(p)) - for i, name := range p { - // For now we don't actually support modules with multiple instances - // identified by keys, so we just treat every path element as a - // step with no key. - ret[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - return ret -} - -// State keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -type State struct { - // Version is the state file protocol version. - Version int `json:"version"` - - // TFVersion is the version of Terraform that wrote this state. - TFVersion string `json:"terraform_version,omitempty"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Lineage is set when a new, blank state is created and then - // never updated. This allows us to determine whether the serials - // of two states can be meaningfully compared. - // Apart from the guarantee that collisions between two lineages - // are very unlikely, this value is opaque and external callers - // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *RemoteState `json:"remote,omitempty"` - - // Backend tracks the configuration for the backend in use with - // this state. This is used to track any changes in the backend - // configuration. - Backend *BackendState `json:"backend,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*ModuleState `json:"modules"` - - mu sync.Mutex -} - -func (s *State) Lock() { s.mu.Lock() } -func (s *State) Unlock() { s.mu.Unlock() } - -// NewState is used to initialize a blank state -func NewState() *State { - s := &State{} - s.init() - return s -} - -// Children returns the ModuleStates that are direct children of -// the given path. If the path is "root", for example, then children -// returned might be "root.child", but not "root.child.grandchild". -func (s *State) Children(path []string) []*ModuleState { - s.Lock() - defer s.Unlock() - // TODO: test - - return s.children(path) -} - -func (s *State) children(path []string) []*ModuleState { - result := make([]*ModuleState, 0) - for _, m := range s.Modules { - if m == nil { - continue - } - - if len(m.Path) != len(path)+1 { - continue - } - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - result = append(result, m) - } - - return result -} - -// AddModule adds the module with the given path to the state. -// -// This should be the preferred method to add module states since it -// allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { - s.Lock() - defer s.Unlock() - - return s.addModule(path) -} - -func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { - // check if the module exists first - m := s.moduleByPath(path) - if m != nil { - return m - } - - // Lower the new-style address into a legacy-style address. - // This requires that none of the steps have instance keys, which is - // true for all addresses at the time of implementing this because - // "count" and "for_each" are not yet implemented for modules. - // For the purposes of state, the legacy address format also includes - // a redundant extra prefix element "root". It is important to include - // this because the "prune" method will remove any module that has a - // path length less than one, and other parts of the state code will - // trim off the first element indiscriminately. - legacyPath := make([]string, len(path)+1) - legacyPath[0] = "root" - for i, step := range path { - if step.InstanceKey != addrs.NoKey { - // FIXME: Once the rest of Terraform is ready to use count and - // for_each, remove all of this and just write the addrs.ModuleInstance - // value itself into the ModuleState. - panic("state cannot represent modules with count or for_each keys") - } - - legacyPath[i+1] = step.Name - } - - m = &ModuleState{Path: legacyPath} - m.init() - s.Modules = append(s.Modules, m) - s.sort() - return m -} - -// ModuleByPath is used to lookup the module state for the given path. -// This should be the preferred lookup mechanism as it allows for future -// lookup optimizations. -func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { - if s == nil { - return nil - } - s.Lock() - defer s.Unlock() - - return s.moduleByPath(path) -} - -func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { - for _, mod := range s.Modules { - if mod == nil { - continue - } - if mod.Path == nil { - panic("missing module path") - } - modPath := normalizeModulePath(mod.Path) - if modPath.String() == path.String() { - return mod - } - } - return nil -} - -// Empty returns true if the state is empty. -func (s *State) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return len(s.Modules) == 0 -} - -// HasResources returns true if the state contains any resources. -// -// This is similar to !s.Empty, but returns true also in the case where the -// state has modules but all of them are devoid of resources. -func (s *State) HasResources() bool { - if s.Empty() { - return false - } - - for _, mod := range s.Modules { - if len(mod.Resources) > 0 { - return true - } - } - - return false -} - -// IsRemote returns true if State represents a state that exists and is -// remote. -func (s *State) IsRemote() bool { - if s == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Remote == nil { - return false - } - if s.Remote.Type == "" { - return false - } - - return true -} - -// Validate validates the integrity of this state file. -// -// Certain properties of the statefile are expected by Terraform in order -// to behave properly. The core of Terraform will assume that once it -// receives a State structure that it has been validated. This validation -// check should be called to ensure that. -// -// If this returns an error, then the user should be notified. The error -// response will include detailed information on the nature of the error. -func (s *State) Validate() error { - s.Lock() - defer s.Unlock() - - var result error - - // !!!! FOR DEVELOPERS !!!! - // - // Any errors returned from this Validate function will BLOCK TERRAFORM - // from loading a state file. Therefore, this should only contain checks - // that are only resolvable through manual intervention. - // - // !!!! FOR DEVELOPERS !!!! - - // Make sure there are no duplicate module states. We open a new - // block here so we can use basic variable names and future validations - // can do the same. - { - found := make(map[string]struct{}) - for _, ms := range s.Modules { - if ms == nil { - continue - } - - key := strings.Join(ms.Path, ".") - if _, ok := found[key]; ok { - result = multierror.Append(result, fmt.Errorf( - strings.TrimSpace(stateValidateErrMultiModule), key)) - continue - } - - found[key] = struct{}{} - } - } - - return result -} - -// Remove removes the item in the state at the given address, returning -// any errors that may have occurred. -// -// If the address references a module state or resource, it will delete -// all children as well. To check what will be deleted, use a StateFilter -// first. -func (s *State) Remove(addr ...string) error { - s.Lock() - defer s.Unlock() - - // Filter out what we need to delete - filter := &StateFilter{State: s} - results, err := filter.Filter(addr...) - if err != nil { - return err - } - - // If we have no results, just exit early, we're not going to do anything. - // While what happens below is fairly fast, this is an important early - // exit since the prune below might modify the state more and we don't - // want to modify the state if we don't have to. - if len(results) == 0 { - return nil - } - - // Go through each result and grab what we need - removed := make(map[interface{}]struct{}) - for _, r := range results { - // Convert the path to our own type - path := append([]string{"root"}, r.Path...) - - // If we removed this already, then ignore - if _, ok := removed[r.Value]; ok { - continue - } - - // If we removed the parent already, then ignore - if r.Parent != nil { - if _, ok := removed[r.Parent.Value]; ok { - continue - } - } - - // Add this to the removed list - removed[r.Value] = struct{}{} - - switch v := r.Value.(type) { - case *ModuleState: - s.removeModule(path, v) - case *ResourceState: - s.removeResource(path, v) - case *InstanceState: - s.removeInstance(path, r.Parent.Value.(*ResourceState), v) - default: - return fmt.Errorf("unknown type to delete: %T", r.Value) - } - } - - // Prune since the removal functions often do the bare minimum to - // remove a thing and may leave around dangling empty modules, resources, - // etc. Prune will clean that all up. - s.prune() - - return nil -} - -func (s *State) removeModule(path []string, v *ModuleState) { - for i, m := range s.Modules { - if m == v { - s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil - return - } - } -} - -func (s *State) removeResource(path []string, v *ResourceState) { - // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(normalizeModulePath(path)) - if mod == nil { - return - } - - // Find this resource. This is a O(N) lookup when if we had the key - // it could be O(1) but even with thousands of resources this shouldn't - // matter right now. We can easily up performance here when the time comes. - for k, r := range mod.Resources { - if r == v { - // Found it - delete(mod.Resources, k) - return - } - } -} - -func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { - // Go through the resource and find the instance that matches this - // (if any) and remove it. - - // Check primary - if r.Primary == v { - r.Primary = nil - return - } - - // Check lists - lists := [][]*InstanceState{r.Deposed} - for _, is := range lists { - for i, instance := range is { - if instance == v { - // Found it, remove it - is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil - - // Done - return - } - } - } -} - -// RootModule returns the ModuleState for the root module -func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(addrs.RootModuleInstance) - if root == nil { - panic("missing root module") - } - return root -} - -// Equal tests if one state is equal to another. -func (s *State) Equal(other *State) bool { - // If one is nil, we do a direct check - if s == nil || other == nil { - return s == other - } - - s.Lock() - defer s.Unlock() - return s.equal(other) -} - -func (s *State) equal(other *State) bool { - if s == nil || other == nil { - return s == other - } - - // If the versions are different, they're certainly not equal - if s.Version != other.Version { - return false - } - - // If any of the modules are not equal, then this state isn't equal - if len(s.Modules) != len(other.Modules) { - return false - } - for _, m := range s.Modules { - // This isn't very optimal currently but works. - otherM := other.moduleByPath(normalizeModulePath(m.Path)) - if otherM == nil { - return false - } - - // If they're not equal, then we're not equal! - if !m.Equal(otherM) { - return false - } - } - - return true -} - -// MarshalEqual is similar to Equal but provides a stronger definition of -// "equal", where two states are equal if and only if their serialized form -// is byte-for-byte identical. -// -// This is primarily useful for callers that are trying to save snapshots -// of state to persistent storage, allowing them to detect when a new -// snapshot must be taken. -// -// Note that the serial number and lineage are included in the serialized form, -// so it's the caller's responsibility to properly manage these attributes -// so that this method is only called on two states that have the same -// serial and lineage, unless detecting such differences is desired. -func (s *State) MarshalEqual(other *State) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - recvBuf := &bytes.Buffer{} - otherBuf := &bytes.Buffer{} - - err := WriteState(s, recvBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - err = WriteState(other, otherBuf) - if err != nil { - // should never happen, since we're writing to a buffer - panic(err) - } - - return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) -} - -type StateAgeComparison int - -const ( - StateAgeEqual StateAgeComparison = 0 - StateAgeReceiverNewer StateAgeComparison = 1 - StateAgeReceiverOlder StateAgeComparison = -1 -) - -// CompareAges compares one state with another for which is "older". -// -// This is a simple check using the state's serial, and is thus only as -// reliable as the serial itself. In the normal case, only one state -// exists for a given combination of lineage/serial, but Terraform -// does not guarantee this and so the result of this method should be -// used with care. -// -// Returns an integer that is negative if the receiver is older than -// the argument, positive if the converse, and zero if they are equal. -// An error is returned if the two states are not of the same lineage, -// in which case the integer returned has no meaning. -func (s *State) CompareAges(other *State) (StateAgeComparison, error) { - // nil states are "older" than actual states - switch { - case s != nil && other == nil: - return StateAgeReceiverNewer, nil - case s == nil && other != nil: - return StateAgeReceiverOlder, nil - case s == nil && other == nil: - return StateAgeEqual, nil - } - - if !s.SameLineage(other) { - return StateAgeEqual, fmt.Errorf( - "can't compare two states of differing lineage", - ) - } - - s.Lock() - defer s.Unlock() - - switch { - case s.Serial < other.Serial: - return StateAgeReceiverOlder, nil - case s.Serial > other.Serial: - return StateAgeReceiverNewer, nil - default: - return StateAgeEqual, nil - } -} - -// SameLineage returns true only if the state given in argument belongs -// to the same "lineage" of states as the receiver. -func (s *State) SameLineage(other *State) bool { - s.Lock() - defer s.Unlock() - - // If one of the states has no lineage then it is assumed to predate - // this concept, and so we'll accept it as belonging to any lineage - // so that a lineage string can be assigned to newer versions - // without breaking compatibility with older versions. - if s.Lineage == "" || other.Lineage == "" { - return true - } - - return s.Lineage == other.Lineage -} - -// DeepCopy performs a deep copy of the state structure and returns -// a new structure. -func (s *State) DeepCopy() *State { - if s == nil { - return nil - } - - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*State) -} - -// FromFutureTerraform checks if this state was written by a Terraform -// version from the future. -func (s *State) FromFutureTerraform() bool { - s.Lock() - defer s.Unlock() - - // No TF version means it is certainly from the past - if s.TFVersion == "" { - return false - } - - v := version.Must(version.NewVersion(s.TFVersion)) - return tfversion.SemVer.LessThan(v) -} - -func (s *State) Init() { - s.Lock() - defer s.Unlock() - s.init() -} - -func (s *State) init() { - if s.Version == 0 { - s.Version = StateVersion - } - - if s.moduleByPath(addrs.RootModuleInstance) == nil { - s.addModule(addrs.RootModuleInstance) - } - s.ensureHasLineage() - - for _, mod := range s.Modules { - if mod != nil { - mod.init() - } - } - - if s.Remote != nil { - s.Remote.init() - } - -} - -func (s *State) EnsureHasLineage() { - s.Lock() - defer s.Unlock() - - s.ensureHasLineage() -} - -func (s *State) ensureHasLineage() { - if s.Lineage == "" { - lineage, err := uuid.GenerateUUID() - if err != nil { - panic(fmt.Errorf("Failed to generate lineage: %v", err)) - } - s.Lineage = lineage - log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) - } else { - log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) - } -} - -// AddModuleState insert this module state and override any existing ModuleState -func (s *State) AddModuleState(mod *ModuleState) { - mod.init() - s.Lock() - defer s.Unlock() - - s.addModuleState(mod) -} - -func (s *State) addModuleState(mod *ModuleState) { - for i, m := range s.Modules { - if reflect.DeepEqual(m.Path, mod.Path) { - s.Modules[i] = mod - return - } - } - - s.Modules = append(s.Modules, mod) - s.sort() -} - -// prune is used to remove any resources that are no longer required -func (s *State) prune() { - if s == nil { - return - } - - // Filter out empty modules. - // A module is always assumed to have a path, and it's length isn't always - // bounds checked later on. Modules may be "emptied" during destroy, but we - // never want to store those in the state. - for i := 0; i < len(s.Modules); i++ { - if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { - s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) - i-- - } - } - - for _, mod := range s.Modules { - mod.prune() - } - if s.Remote != nil && s.Remote.Empty() { - s.Remote = nil - } -} - -// sort sorts the modules -func (s *State) sort() { - sort.Sort(moduleStateSort(s.Modules)) - - // Allow modules to be sorted - for _, m := range s.Modules { - if m != nil { - m.sort() - } - } -} - -func (s *State) String() string { - if s == nil { - return "" - } - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - for _, m := range s.Modules { - mStr := m.String() - - // If we're the root module, we just write the output directly. - if reflect.DeepEqual(m.Path, rootModulePath) { - buf.WriteString(mStr + "\n") - continue - } - - buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) - - s := bufio.NewScanner(strings.NewReader(mStr)) - for s.Scan() { - text := s.Text() - if text != "" { - text = " " + text - } - - buf.WriteString(fmt.Sprintf("%s\n", text)) - } - } - - return strings.TrimSpace(buf.String()) -} - -// BackendState stores the configuration to connect to a remote backend. -type BackendState struct { - Type string `json:"type"` // Backend type - ConfigRaw json.RawMessage `json:"config"` // Backend raw config - Hash uint64 `json:"hash"` // Hash of portion of configuration from config files -} - -// Empty returns true if BackendState has no state. -func (s *BackendState) Empty() bool { - return s == nil || s.Type == "" -} - -// Config decodes the type-specific configuration object using the provided -// schema and returns the result as a cty.Value. -// -// An error is returned if the stored configuration does not conform to the -// given schema. -func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { - ty := schema.ImpliedType() - if s == nil { - return cty.NullVal(ty), nil - } - return ctyjson.Unmarshal(s.ConfigRaw, ty) -} - -// SetConfig replaces (in-place) the type-specific configuration object using -// the provided value and associated schema. -// -// An error is returned if the given value does not conform to the implied -// type of the schema. -func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { - ty := schema.ImpliedType() - buf, err := ctyjson.Marshal(val, ty) - if err != nil { - return err - } - s.ConfigRaw = buf - return nil -} - -// ForPlan produces an alternative representation of the reciever that is -// suitable for storing in a plan. The current workspace must additionally -// be provided, to be stored alongside the backend configuration. -// -// The backend configuration schema is required in order to properly -// encode the backend-specific configuration settings. -func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { - if s == nil { - return nil, nil - } - - configVal, err := s.Config(schema) - if err != nil { - return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) - } - return plans.NewBackend(s.Type, configVal, schema, workspaceName) -} - -// RemoteState is used to track the information about a remote -// state store that we push/pull state to. -type RemoteState struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` - - mu sync.Mutex -} - -func (s *RemoteState) Lock() { s.mu.Lock() } -func (s *RemoteState) Unlock() { s.mu.Unlock() } - -func (r *RemoteState) init() { - r.Lock() - defer r.Unlock() - - if r.Config == nil { - r.Config = make(map[string]string) - } -} - -func (r *RemoteState) deepcopy() *RemoteState { - r.Lock() - defer r.Unlock() - - confCopy := make(map[string]string, len(r.Config)) - for k, v := range r.Config { - confCopy[k] = v - } - return &RemoteState{ - Type: r.Type, - Config: confCopy, - } -} - -func (r *RemoteState) Empty() bool { - if r == nil { - return true - } - r.Lock() - defer r.Unlock() - - return r.Type == "" -} - -func (r *RemoteState) Equals(other *RemoteState) bool { - r.Lock() - defer r.Unlock() - - if r.Type != other.Type { - return false - } - if len(r.Config) != len(other.Config) { - return false - } - for k, v := range r.Config { - if other.Config[k] != v { - return false - } - } - return true -} - -// OutputState is used to track the state relevant to a single output. -type OutputState struct { - // Sensitive describes whether the output is considered sensitive, - // which may lead to masking the value on screen in some cases. - Sensitive bool `json:"sensitive"` - // Type describes the structure of Value. Valid values are "string", - // "map" and "list" - Type string `json:"type"` - // Value contains the value of the output, in the structure described - // by the Type field. - Value interface{} `json:"value"` - - mu sync.Mutex -} - -func (s *OutputState) Lock() { s.mu.Lock() } -func (s *OutputState) Unlock() { s.mu.Unlock() } - -func (s *OutputState) String() string { - return fmt.Sprintf("%#v", s.Value) -} - -// Equal compares two OutputState structures for equality. nil values are -// considered equal. -func (s *OutputState) Equal(other *OutputState) bool { - if s == nil && other == nil { - return true - } - - if s == nil || other == nil { - return false - } - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Sensitive != other.Sensitive { - return false - } - - if !reflect.DeepEqual(s.Value, other.Value) { - return false - } - - return true -} - -func (s *OutputState) deepcopy() *OutputState { - if s == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(fmt.Errorf("Error copying output value: %s", err)) - } - - return stateCopy.(*OutputState) -} - -// ModuleState is used to track all the state relevant to a single -// module. Previous to Terraform 0.3, all state belonged to the "root" -// module. -type ModuleState struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Locals are kept only transiently in-memory, because we can always - // re-compute them. - Locals map[string]interface{} `json:"-"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]*OutputState `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*ResourceState `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - mu sync.Mutex -} - -func (s *ModuleState) Lock() { s.mu.Lock() } -func (s *ModuleState) Unlock() { s.mu.Unlock() } - -// Equal tests whether one module state is equal to another. -func (m *ModuleState) Equal(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - // Paths must be equal - if !reflect.DeepEqual(m.Path, other.Path) { - return false - } - - // Outputs must be equal - if len(m.Outputs) != len(other.Outputs) { - return false - } - for k, v := range m.Outputs { - if !other.Outputs[k].Equal(v) { - return false - } - } - - // Dependencies must be equal. This sorts these in place but - // this shouldn't cause any problems. - sort.Strings(m.Dependencies) - sort.Strings(other.Dependencies) - if len(m.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range m.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // Resources must be equal - if len(m.Resources) != len(other.Resources) { - return false - } - for k, r := range m.Resources { - otherR, ok := other.Resources[k] - if !ok { - return false - } - - if !r.Equal(otherR) { - return false - } - } - - return true -} - -// IsRoot says whether or not this module diff is for the root module. -func (m *ModuleState) IsRoot() bool { - m.Lock() - defer m.Unlock() - return reflect.DeepEqual(m.Path, rootModulePath) -} - -// IsDescendent returns true if other is a descendent of this module. -func (m *ModuleState) IsDescendent(other *ModuleState) bool { - m.Lock() - defer m.Unlock() - - i := len(m.Path) - return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) -} - -// Orphans returns a list of keys of resources that are in the State -// but aren't present in the configuration itself. Hence, these keys -// represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { - m.Lock() - defer m.Unlock() - - inConfig := make(map[string]struct{}) - if c != nil { - for _, r := range c.ManagedResources { - inConfig[r.Addr().String()] = struct{}{} - } - for _, r := range c.DataResources { - inConfig[r.Addr().String()] = struct{}{} - } - } - - var result []addrs.ResourceInstance - for k := range m.Resources { - // Since we've not yet updated state to use our new address format, - // we need to do some shimming here. - legacyAddr, err := parseResourceAddressInternal(k) - if err != nil { - // Suggests that the user tampered with the state, since we always - // generate valid internal addresses. - log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) - continue - } - - addr := legacyAddr.AbsResourceInstanceAddr().Resource - compareKey := addr.Resource.String() // compare by resource address, ignoring instance key - if _, exists := inConfig[compareKey]; !exists { - result = append(result, addr) - } - } - return result -} - -// RemovedOutputs returns a list of outputs that are in the State but aren't -// present in the configuration itself. -func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { - if outputs == nil { - // If we got no output map at all then we'll just treat our set of - // configured outputs as empty, since that suggests that they've all - // been removed by removing their containing module. - outputs = make(map[string]*configs.Output) - } - - s.Lock() - defer s.Unlock() - - var ret []addrs.OutputValue - for n := range s.Outputs { - if _, declared := outputs[n]; !declared { - ret = append(ret, addrs.OutputValue{ - Name: n, - }) - } - } - - return ret -} - -// View returns a view with the given resource prefix. -func (m *ModuleState) View(id string) *ModuleState { - if m == nil { - return m - } - - r := m.deepcopy() - for k, _ := range r.Resources { - if id == k || strings.HasPrefix(k, id+".") { - continue - } - - delete(r.Resources, k) - } - - return r -} - -func (m *ModuleState) init() { - m.Lock() - defer m.Unlock() - - if m.Path == nil { - m.Path = []string{} - } - if m.Outputs == nil { - m.Outputs = make(map[string]*OutputState) - } - if m.Resources == nil { - m.Resources = make(map[string]*ResourceState) - } - - if m.Dependencies == nil { - m.Dependencies = make([]string, 0) - } - - for _, rs := range m.Resources { - rs.init() - } -} - -func (m *ModuleState) deepcopy() *ModuleState { - if m == nil { - return nil - } - - stateCopy, err := copystructure.Config{Lock: true}.Copy(m) - if err != nil { - panic(err) - } - - return stateCopy.(*ModuleState) -} - -// prune is used to remove any resources that are no longer required -func (m *ModuleState) prune() { - m.Lock() - defer m.Unlock() - - for k, v := range m.Resources { - if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { - delete(m.Resources, k) - continue - } - - v.prune() - } - - for k, v := range m.Outputs { - if v.Value == hcl2shim.UnknownVariableValue { - delete(m.Outputs, k) - } - } - - m.Dependencies = uniqueStrings(m.Dependencies) -} - -func (m *ModuleState) sort() { - for _, v := range m.Resources { - v.sort() - } -} - -func (m *ModuleState) String() string { - m.Lock() - defer m.Unlock() - - var buf bytes.Buffer - - if len(m.Resources) == 0 { - buf.WriteString("") - } - - names := make([]string, 0, len(m.Resources)) - for name, _ := range m.Resources { - names = append(names, name) - } - - sort.Sort(resourceNameSort(names)) - - for _, k := range names { - rs := m.Resources[k] - var id string - if rs.Primary != nil { - id = rs.Primary.ID - } - if id == "" { - id = "" - } - - taintStr := "" - if rs.Primary.Tainted { - taintStr = " (tainted)" - } - - deposedStr := "" - if len(rs.Deposed) > 0 { - deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) - } - - buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) - buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) - if rs.Provider != "" { - buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) - } - - var attributes map[string]string - if rs.Primary != nil { - attributes = rs.Primary.Attributes - } - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) - } - - for idx, t := range rs.Deposed { - taintStr := "" - if t.Tainted { - taintStr = " (tainted)" - } - buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) - } - - if len(rs.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) - for _, dep := range rs.Dependencies { - buf.WriteString(fmt.Sprintf(" %s\n", dep)) - } - } - } - - if len(m.Outputs) > 0 { - buf.WriteString("\nOutputs:\n\n") - - ks := make([]string, 0, len(m.Outputs)) - for k, _ := range m.Outputs { - ks = append(ks, k) - } - - sort.Strings(ks) - - for _, k := range ks { - v := m.Outputs[k] - switch vTyped := v.Value.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case []interface{}: - buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) - case map[string]interface{}: - var mapKeys []string - for key, _ := range vTyped { - mapKeys = append(mapKeys, key) - } - sort.Strings(mapKeys) - - var mapBuf bytes.Buffer - mapBuf.WriteString("{") - for _, key := range mapKeys { - mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) - } - mapBuf.WriteString("}") - - buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) - } - } - } - - return buf.String() -} - -func (m *ModuleState) Empty() bool { - return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 -} - -// ResourceStateKey is a structured representation of the key used for the -// ModuleState.Resources mapping -type ResourceStateKey struct { - Name string - Type string - Mode ResourceMode - Index int -} - -// Equal determines whether two ResourceStateKeys are the same -func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { - if rsk == nil || other == nil { - return false - } - if rsk.Mode != other.Mode { - return false - } - if rsk.Type != other.Type { - return false - } - if rsk.Name != other.Name { - return false - } - if rsk.Index != other.Index { - return false - } - return true -} - -func (rsk *ResourceStateKey) String() string { - if rsk == nil { - return "" - } - var prefix string - switch rsk.Mode { - case ManagedResourceMode: - prefix = "" - case DataResourceMode: - prefix = "data." - default: - panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) - } - if rsk.Index == -1 { - return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) - } - return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) -} - -// ParseResourceStateKey accepts a key in the format used by -// ModuleState.Resources and returns a resource name and resource index. In the -// state, a resource has the format "type.name.index" or "type.name". In the -// latter case, the index is returned as -1. -func ParseResourceStateKey(k string) (*ResourceStateKey, error) { - parts := strings.Split(k, ".") - mode := ManagedResourceMode - if len(parts) > 0 && parts[0] == "data" { - mode = DataResourceMode - // Don't need the constant "data" prefix for parsing - // now that we've figured out the mode. - parts = parts[1:] - } - if len(parts) < 2 || len(parts) > 3 { - return nil, fmt.Errorf("Malformed resource state key: %s", k) - } - rsk := &ResourceStateKey{ - Mode: mode, - Type: parts[0], - Name: parts[1], - Index: -1, - } - if len(parts) == 3 { - index, err := strconv.Atoi(parts[2]) - if err != nil { - return nil, fmt.Errorf("Malformed resource state key index: %s", k) - } - rsk.Index = index - } - return rsk, nil -} - -// ResourceState holds the state of a resource that is used so that -// a provider can find and manage an existing resource as well as for -// storing attributes that are used to populate variables of child -// resources. -// -// Attributes has attributes about the created resource that are -// queryable in interpolation: "${type.id.attr}" -// -// Extra is just extra data that a provider can return that we store -// for later, but is not exposed in any way to the user. -// -type ResourceState struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *InstanceState `json:"primary"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. - // - // If there were problems creating the replacement Primary, the Deposed - // instance and the (now tainted) replacement Primary will be swapped so the - // tainted replacement will be cleaned up instead. - // - // An instance will remain in the Deposed list until it is successfully - // destroyed and purged. - Deposed []*InstanceState `json:"deposed"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider"` - - mu sync.Mutex -} - -func (s *ResourceState) Lock() { s.mu.Lock() } -func (s *ResourceState) Unlock() { s.mu.Unlock() } - -// Equal tests whether two ResourceStates are equal. -func (s *ResourceState) Equal(other *ResourceState) bool { - s.Lock() - defer s.Unlock() - - if s.Type != other.Type { - return false - } - - if s.Provider != other.Provider { - return false - } - - // Dependencies must be equal - sort.Strings(s.Dependencies) - sort.Strings(other.Dependencies) - if len(s.Dependencies) != len(other.Dependencies) { - return false - } - for i, d := range s.Dependencies { - if other.Dependencies[i] != d { - return false - } - } - - // States must be equal - if !s.Primary.Equal(other.Primary) { - return false - } - - return true -} - -// Taint marks a resource as tainted. -func (s *ResourceState) Taint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = true - } -} - -// Untaint unmarks a resource as tainted. -func (s *ResourceState) Untaint() { - s.Lock() - defer s.Unlock() - - if s.Primary != nil { - s.Primary.Tainted = false - } -} - -// ProviderAddr returns the provider address for the receiver, by parsing the -// string representation saved in state. An error can be returned if the -// value in state is corrupt. -func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { - var diags tfdiags.Diagnostics - - str := s.Provider - traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(travDiags) - if travDiags.HasErrors() { - return addrs.AbsProviderConfig{}, diags.Err() - } - - addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) - diags = diags.Append(addrDiags) - return addr, diags.Err() -} - -func (s *ResourceState) init() { - s.Lock() - defer s.Unlock() - - if s.Primary == nil { - s.Primary = &InstanceState{} - } - s.Primary.init() - - if s.Dependencies == nil { - s.Dependencies = []string{} - } - - if s.Deposed == nil { - s.Deposed = make([]*InstanceState, 0) - } -} - -func (s *ResourceState) deepcopy() *ResourceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*ResourceState) -} - -// prune is used to remove any instances that are no longer required -func (s *ResourceState) prune() { - s.Lock() - defer s.Unlock() - - n := len(s.Deposed) - for i := 0; i < n; i++ { - inst := s.Deposed[i] - if inst == nil || inst.ID == "" { - copy(s.Deposed[i:], s.Deposed[i+1:]) - s.Deposed[n-1] = nil - n-- - i-- - } - } - s.Deposed = s.Deposed[:n] - - s.Dependencies = uniqueStrings(s.Dependencies) -} - -func (s *ResourceState) sort() { - s.Lock() - defer s.Unlock() - - sort.Strings(s.Dependencies) -} - -func (s *ResourceState) String() string { - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) - return buf.String() -} - -// InstanceState is used to track the unique state information belonging -// to a given instance. -type InstanceState struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral EphemeralState `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. The value here must only contain Go primitives - // and collections. - Meta map[string]interface{} `json:"meta"` - - // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted"` - - mu sync.Mutex -} - -func (s *InstanceState) Lock() { s.mu.Lock() } -func (s *InstanceState) Unlock() { s.mu.Unlock() } - -func (s *InstanceState) init() { - s.Lock() - defer s.Unlock() - - if s.Attributes == nil { - s.Attributes = make(map[string]string) - } - if s.Meta == nil { - s.Meta = make(map[string]interface{}) - } - s.Ephemeral.init() -} - -// NewInstanceStateShimmedFromValue is a shim method to lower a new-style -// object value representing the attributes of an instance object into the -// legacy InstanceState representation. -// -// This is for shimming to old components only and should not be used in new code. -func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { - attrs := hcl2shim.FlatmapValueFromHCL2(state) - return &InstanceState{ - ID: attrs["id"], - Attributes: attrs, - Meta: map[string]interface{}{ - "schema_version": schemaVersion, - }, - } -} - -// AttrsAsObjectValue shims from the legacy InstanceState representation to -// a new-style cty object value representation of the state attributes, using -// the given type for guidance. -// -// The given type must be the implied type of the schema of the resource type -// of the object whose state is being converted, or the result is undefined. -// -// This is for shimming from old components only and should not be used in -// new code. -func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { - if s == nil { - // if the state is nil, we need to construct a complete cty.Value with - // null attributes, rather than a single cty.NullVal(ty) - s = &InstanceState{} - } - - if s.Attributes == nil { - s.Attributes = map[string]string{} - } - - // make sure ID is included in the attributes. The InstanceState.ID value - // takes precedence. - if s.ID != "" { - s.Attributes["id"] = s.ID - } - - return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) -} - -// Copy all the Fields from another InstanceState -func (s *InstanceState) Set(from *InstanceState) { - s.Lock() - defer s.Unlock() - - from.Lock() - defer from.Unlock() - - s.ID = from.ID - s.Attributes = from.Attributes - s.Ephemeral = from.Ephemeral - s.Meta = from.Meta - s.Tainted = from.Tainted -} - -func (s *InstanceState) DeepCopy() *InstanceState { - copy, err := copystructure.Config{Lock: true}.Copy(s) - if err != nil { - panic(err) - } - - return copy.(*InstanceState) -} - -func (s *InstanceState) Empty() bool { - if s == nil { - return true - } - s.Lock() - defer s.Unlock() - - return s.ID == "" -} - -func (s *InstanceState) Equal(other *InstanceState) bool { - // Short circuit some nil checks - if s == nil || other == nil { - return s == other - } - s.Lock() - defer s.Unlock() - - // IDs must be equal - if s.ID != other.ID { - return false - } - - // Attributes must be equal - if len(s.Attributes) != len(other.Attributes) { - return false - } - for k, v := range s.Attributes { - otherV, ok := other.Attributes[k] - if !ok { - return false - } - - if v != otherV { - return false - } - } - - // Meta must be equal - if len(s.Meta) != len(other.Meta) { - return false - } - if s.Meta != nil && other.Meta != nil { - // We only do the deep check if both are non-nil. If one is nil - // we treat it as equal since their lengths are both zero (check - // above). - // - // Since this can contain numeric values that may change types during - // serialization, let's compare the serialized values. - sMeta, err := json.Marshal(s.Meta) - if err != nil { - // marshaling primitives shouldn't ever error out - panic(err) - } - otherMeta, err := json.Marshal(other.Meta) - if err != nil { - panic(err) - } - - if !bytes.Equal(sMeta, otherMeta) { - return false - } - } - - if s.Tainted != other.Tainted { - return false - } - - return true -} - -// MergeDiff takes a ResourceDiff and merges the attributes into -// this resource state in order to generate a new state. This new -// state can be used to provide updated attribute lookups for -// variable interpolation. -// -// If the diff attribute requires computing the value, and hence -// won't be available until apply, the value is replaced with the -// computeID. -func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { - result := s.DeepCopy() - if result == nil { - result = new(InstanceState) - } - result.init() - - if s != nil { - s.Lock() - defer s.Unlock() - for k, v := range s.Attributes { - result.Attributes[k] = v - } - } - if d != nil { - for k, diff := range d.CopyAttributes() { - if diff.NewRemoved { - delete(result.Attributes, k) - continue - } - if diff.NewComputed { - result.Attributes[k] = hcl2shim.UnknownVariableValue - continue - } - - result.Attributes[k] = diff.New - } - } - - return result -} - -func (s *InstanceState) String() string { - notCreated := "" - - if s == nil { - return notCreated - } - - s.Lock() - defer s.Unlock() - - var buf bytes.Buffer - - if s.ID == "" { - return notCreated - } - - buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) - - attributes := s.Attributes - attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { - if ak == "id" { - continue - } - - attrKeys = append(attrKeys, ak) - } - sort.Strings(attrKeys) - - for _, ak := range attrKeys { - av := attributes[ak] - buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) - } - - buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) - - return buf.String() -} - -// EphemeralState is used for transient state that is only kept in-memory -type EphemeralState struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` - - // Type is used to specify the resource type for this instance. This is only - // required for import operations (as documented). If the documentation - // doesn't state that you need to set this, then don't worry about - // setting it. - Type string `json:"-"` -} - -func (e *EphemeralState) init() { - if e.ConnInfo == nil { - e.ConnInfo = make(map[string]string) - } -} - -func (e *EphemeralState) DeepCopy() *EphemeralState { - copy, err := copystructure.Config{Lock: true}.Copy(e) - if err != nil { - panic(err) - } - - return copy.(*EphemeralState) -} - -type jsonStateVersionIdentifier struct { - Version int `json:"version"` -} - -// Check if this is a V0 format - the magic bytes at the start of the file -// should be "tfstate" if so. We no longer support upgrading this type of -// state but return an error message explaining to a user how they can -// upgrade via the 0.6.x series. -func testForV0State(buf *bufio.Reader) error { - start, err := buf.Peek(len("tfstate")) - if err != nil { - return fmt.Errorf("Failed to check for magic bytes: %v", err) - } - if string(start) == "tfstate" { - return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + - "format which was used prior to Terraform 0.3. Please upgrade\n" + - "this state file using Terraform 0.6.16 prior to using it with\n" + - "Terraform 0.7.") - } - - return nil -} - -// ErrNoState is returned by ReadState when the io.Reader contains no data -var ErrNoState = errors.New("no state") - -// ReadState reads a state structure out of a reader in the format that -// was written by WriteState. -func ReadState(src io.Reader) (*State, error) { - // check for a nil file specifically, since that produces a platform - // specific error if we try to use it in a bufio.Reader. - if f, ok := src.(*os.File); ok && f == nil { - return nil, ErrNoState - } - - buf := bufio.NewReader(src) - - if _, err := buf.Peek(1); err != nil { - if err == io.EOF { - return nil, ErrNoState - } - return nil, err - } - - if err := testForV0State(buf); err != nil { - return nil, err - } - - // If we are JSON we buffer the whole thing in memory so we can read it twice. - // This is suboptimal, but will work for now. - jsonBytes, err := ioutil.ReadAll(buf) - if err != nil { - return nil, fmt.Errorf("Reading state file failed: %v", err) - } - - versionIdentifier := &jsonStateVersionIdentifier{} - if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { - return nil, fmt.Errorf("Decoding state file version failed: %v", err) - } - - var result *State - switch versionIdentifier.Version { - case 0: - return nil, fmt.Errorf("State version 0 is not supported as JSON.") - case 1: - v1State, err := ReadStateV1(jsonBytes) - if err != nil { - return nil, err - } - - v2State, err := upgradeStateV1ToV2(v1State) - if err != nil { - return nil, err - } - - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - // increment the Serial whenever we upgrade state - v3State.Serial++ - result = v3State - case 2: - v2State, err := ReadStateV2(jsonBytes) - if err != nil { - return nil, err - } - v3State, err := upgradeStateV2ToV3(v2State) - if err != nil { - return nil, err - } - - v3State.Serial++ - result = v3State - case 3: - v3State, err := ReadStateV3(jsonBytes) - if err != nil { - return nil, err - } - - result = v3State - default: - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), versionIdentifier.Version) - } - - // If we reached this place we must have a result set - if result == nil { - panic("resulting state in load not set, assertion failed") - } - - // Prune the state when read it. Its possible to write unpruned states or - // for a user to make a state unpruned (nil-ing a module state for example). - result.prune() - - // Validate the state file is valid - if err := result.Validate(); err != nil { - return nil, err - } - - return result, nil -} - -func ReadStateV1(jsonBytes []byte) (*stateV1, error) { - v1State := &stateV1{} - if err := json.Unmarshal(jsonBytes, v1State); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - if v1State.Version != 1 { - return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ - "read %d, expected 1", v1State.Version) - } - - return v1State, nil -} - -func ReadStateV2(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - return state, nil -} - -func ReadStateV3(jsonBytes []byte) (*State, error) { - state := &State{} - if err := json.Unmarshal(jsonBytes, state); err != nil { - return nil, fmt.Errorf("Decoding state file failed: %v", err) - } - - // Check the version, this to ensure we don't read a future - // version that we don't understand - if state.Version > StateVersion { - return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - tfversion.SemVer.String(), state.Version) - } - - // Make sure the version is semantic - if state.TFVersion != "" { - if _, err := version.NewVersion(state.TFVersion); err != nil { - return nil, fmt.Errorf( - "State contains invalid version: %s\n\n"+ - "Terraform validates the version format prior to writing it. This\n"+ - "means that this is invalid of the state becoming corrupted through\n"+ - "some external means. Please manually modify the Terraform version\n"+ - "field to be a proper semantic version.", - state.TFVersion) - } - } - - // catch any unitialized fields in the state - state.init() - - // Sort it - state.sort() - - // Now we write the state back out to detect any changes in normaliztion. - // If our state is now written out differently, bump the serial number to - // prevent conflicts. - var buf bytes.Buffer - err := WriteState(state, &buf) - if err != nil { - return nil, err - } - - if !bytes.Equal(jsonBytes, buf.Bytes()) { - log.Println("[INFO] state modified during read or write. incrementing serial number") - state.Serial++ - } - - return state, nil -} - -// WriteState writes a state somewhere in a binary format. -func WriteState(d *State, dst io.Writer) error { - // writing a nil state is a noop. - if d == nil { - return nil - } - - // make sure we have no uninitialized fields - d.init() - - // Make sure it is sorted - d.sort() - - // Ensure the version is set - d.Version = StateVersion - - // If the TFVersion is set, verify it. We used to just set the version - // here, but this isn't safe since it changes the MD5 sum on some remote - // state storage backends such as Atlas. We now leave it be if needed. - if d.TFVersion != "" { - if _, err := version.NewVersion(d.TFVersion); err != nil { - return fmt.Errorf( - "Error writing state, invalid version: %s\n\n"+ - "The Terraform version when writing the state must be a semantic\n"+ - "version.", - d.TFVersion) - } - } - - // Encode the data in a human-friendly way - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return fmt.Errorf("Failed to encode state: %s", err) - } - - // We append a newline to the data because MarshalIndent doesn't - data = append(data, '\n') - - // Write the data out to the dst - if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { - return fmt.Errorf("Failed to write state: %v", err) - } - - return nil -} - -// resourceNameSort implements the sort.Interface to sort name parts lexically for -// strings and numerically for integer indexes. -type resourceNameSort []string - -func (r resourceNameSort) Len() int { return len(r) } -func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } - -func (r resourceNameSort) Less(i, j int) bool { - iParts := strings.Split(r[i], ".") - jParts := strings.Split(r[j], ".") - - end := len(iParts) - if len(jParts) < end { - end = len(jParts) - } - - for idx := 0; idx < end; idx++ { - if iParts[idx] == jParts[idx] { - continue - } - - // sort on the first non-matching part - iInt, iIntErr := strconv.Atoi(iParts[idx]) - jInt, jIntErr := strconv.Atoi(jParts[idx]) - - switch { - case iIntErr == nil && jIntErr == nil: - // sort numerically if both parts are integers - return iInt < jInt - case iIntErr == nil: - // numbers sort before strings - return true - case jIntErr == nil: - return false - default: - return iParts[idx] < jParts[idx] - } - } - - return r[i] < r[j] -} - -// moduleStateSort implements sort.Interface to sort module states -type moduleStateSort []*ModuleState - -func (s moduleStateSort) Len() int { - return len(s) -} - -func (s moduleStateSort) Less(i, j int) bool { - a := s[i] - b := s[j] - - // If either is nil, then the nil one is "less" than - if a == nil || b == nil { - return a == nil - } - - // If the lengths are different, then the shorter one always wins - if len(a.Path) != len(b.Path) { - return len(a.Path) < len(b.Path) - } - - // Otherwise, compare lexically - return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") -} - -func (s moduleStateSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -const stateValidateErrMultiModule = ` -Multiple modules with the same path: %s - -This means that there are multiple entries in the "modules" field -in your state file that point to the same module. This will cause Terraform -to behave in unexpected and error prone ways and is invalid. Please back up -and modify your state file manually to resolve this. -` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go deleted file mode 100644 index aa13cce8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go +++ /dev/null @@ -1,189 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/mitchellh/copystructure" -) - -// upgradeStateV1ToV2 is used to upgrade a V1 state representation -// into a V2 state representation -func upgradeStateV1ToV2(old *stateV1) (*State, error) { - if old == nil { - return nil, nil - } - - remote, err := old.Remote.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - - modules := make([]*ModuleState, len(old.Modules)) - for i, module := range old.Modules { - upgraded, err := module.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading State V1: %v", err) - } - modules[i] = upgraded - } - if len(modules) == 0 { - modules = nil - } - - newState := &State{ - Version: 2, - Serial: old.Serial, - Remote: remote, - Modules: modules, - } - - newState.sort() - newState.init() - - return newState, nil -} - -func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { - if old == nil { - return nil, nil - } - - config, err := copystructure.Copy(old.Config) - if err != nil { - return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) - } - - return &RemoteState{ - Type: old.Type, - Config: config.(map[string]string), - }, nil -} - -func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { - if old == nil { - return nil, nil - } - - pathRaw, err := copystructure.Copy(old.Path) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - path, ok := pathRaw.([]string) - if !ok { - return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") - } - if len(path) == 0 { - // We found some V1 states with a nil path. Assume root and catch - // duplicate path errors later (as part of Validate). - path = rootModulePath - } - - // Outputs needs upgrading to use the new structure - outputs := make(map[string]*OutputState) - for key, output := range old.Outputs { - outputs[key] = &OutputState{ - Type: "string", - Value: output, - Sensitive: false, - } - } - - resources := make(map[string]*ResourceState) - for key, oldResource := range old.Resources { - upgraded, err := oldResource.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - resources[key] = upgraded - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) - } - - return &ModuleState{ - Path: path, - Outputs: outputs, - Resources: resources, - Dependencies: dependencies.([]string), - }, nil -} - -func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { - if old == nil { - return nil, nil - } - - dependencies, err := copystructure.Copy(old.Dependencies) - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - primary, err := old.Primary.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - - deposed := make([]*InstanceState, len(old.Deposed)) - for i, v := range old.Deposed { - upgraded, err := v.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) - } - deposed[i] = upgraded - } - if len(deposed) == 0 { - deposed = nil - } - - return &ResourceState{ - Type: old.Type, - Dependencies: dependencies.([]string), - Primary: primary, - Deposed: deposed, - Provider: old.Provider, - }, nil -} - -func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { - if old == nil { - return nil, nil - } - - attributes, err := copystructure.Copy(old.Attributes) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - ephemeral, err := old.Ephemeral.upgradeToV2() - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - meta, err := copystructure.Copy(old.Meta) - if err != nil { - return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) - } - - newMeta := make(map[string]interface{}) - for k, v := range meta.(map[string]string) { - newMeta[k] = v - } - - return &InstanceState{ - ID: old.ID, - Attributes: attributes.(map[string]string), - Ephemeral: *ephemeral, - Meta: newMeta, - }, nil -} - -func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { - connInfo, err := copystructure.Copy(old.ConnInfo) - if err != nil { - return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) - } - return &EphemeralState{ - ConnInfo: connInfo.(map[string]string), - }, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go deleted file mode 100644 index e52d35fc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go +++ /dev/null @@ -1,142 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "regexp" - "sort" - "strconv" - "strings" -) - -// The upgrade process from V2 to V3 state does not affect the structure, -// so we do not need to redeclare all of the structs involved - we just -// take a deep copy of the old structure and assert the version number is -// as we expect. -func upgradeStateV2ToV3(old *State) (*State, error) { - new := old.DeepCopy() - - // Ensure the copied version is v2 before attempting to upgrade - if new.Version != 2 { - return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + - "a state which is not version 2.") - } - - // Set the new version number - new.Version = 3 - - // Change the counts for things which look like maps to use the % - // syntax. Remove counts for empty collections - they will be added - // back in later. - for _, module := range new.Modules { - for _, resource := range module.Resources { - // Upgrade Primary - if resource.Primary != nil { - upgradeAttributesV2ToV3(resource.Primary) - } - - // Upgrade Deposed - if resource.Deposed != nil { - for _, deposed := range resource.Deposed { - upgradeAttributesV2ToV3(deposed) - } - } - } - } - - return new, nil -} - -func upgradeAttributesV2ToV3(instanceState *InstanceState) error { - collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) - collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) - - // Identify the key prefix of anything which is a collection - var collectionKeyPrefixes []string - for key := range instanceState.Attributes { - if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) - } - } - sort.Strings(collectionKeyPrefixes) - - log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) - - // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not - // run very often. - for _, prefix := range collectionKeyPrefixes { - // First get the actual keys that belong to this prefix - var potentialKeysMatching []string - for key := range instanceState.Attributes { - if strings.HasPrefix(key, prefix) { - potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) - } - } - sort.Strings(potentialKeysMatching) - - var actualKeysMatching []string - for _, key := range potentialKeysMatching { - if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { - actualKeysMatching = append(actualKeysMatching, submatches[0][1]) - } else { - if key != "#" { - actualKeysMatching = append(actualKeysMatching, key) - } - } - } - actualKeysMatching = uniqueSortedStrings(actualKeysMatching) - - // Now inspect the keys in order to determine whether this is most likely to be - // a map, list or set. There is room for error here, so we log in each case. If - // there is no method of telling, we remove the key from the InstanceState in - // order that it will be recreated. Again, this could be rolled into fewer loops - // but we prefer clarity. - - oldCountKey := fmt.Sprintf("%s#", prefix) - - // First, detect "obvious" maps - which have non-numeric keys (mostly). - hasNonNumericKeys := false - for _, key := range actualKeysMatching { - if _, err := strconv.Atoi(key); err != nil { - hasNonNumericKeys = true - } - } - if hasNonNumericKeys { - newCountKey := fmt.Sprintf("%s%%", prefix) - - instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", - strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) - } - - // Now detect empty collections and remove them from state. - if len(actualKeysMatching) == 0 { - delete(instanceState.Attributes, oldCountKey) - log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", - strings.TrimSuffix(prefix, ".")) - } - } - - return nil -} - -// uniqueSortedStrings removes duplicates from a slice of strings and returns -// a sorted slice of the unique strings. -func uniqueSortedStrings(input []string) []string { - uniquemap := make(map[string]struct{}) - for _, str := range input { - uniquemap[str] = struct{}{} - } - - output := make([]string, len(uniquemap)) - - i := 0 - for key := range uniquemap { - output[i] = key - i = i + 1 - } - - sort.Strings(output) - return output -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go deleted file mode 100644 index 68cffb41..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go +++ /dev/null @@ -1,145 +0,0 @@ -package terraform - -// stateV1 keeps track of a snapshot state-of-the-world that Terraform -// can use to keep track of what real world resources it is actually -// managing. -// -// stateV1 is _only used for the purposes of backwards compatibility -// and is no longer used in Terraform. -// -// For the upgrade process, see state_upgrade_v1_to_v2.go -type stateV1 struct { - // Version is the protocol version. "1" for a StateV1. - Version int `json:"version"` - - // Serial is incremented on any operation that modifies - // the State file. It is used to detect potentially conflicting - // updates. - Serial int64 `json:"serial"` - - // Remote is used to track the metadata required to - // pull and push state files from a remote storage endpoint. - Remote *remoteStateV1 `json:"remote,omitempty"` - - // Modules contains all the modules in a breadth-first order - Modules []*moduleStateV1 `json:"modules"` -} - -type remoteStateV1 struct { - // Type controls the client we use for the remote state - Type string `json:"type"` - - // Config is used to store arbitrary configuration that - // is type specific - Config map[string]string `json:"config"` -} - -type moduleStateV1 struct { - // Path is the import path from the root module. Modules imports are - // always disjoint, so the path represents amodule tree - Path []string `json:"path"` - - // Outputs declared by the module and maintained for each module - // even though only the root module technically needs to be kept. - // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` - - // Resources is a mapping of the logically named resource to - // the state of the resource. Each resource may actually have - // N instances underneath, although a user only needs to think - // about the 1:1 case. - Resources map[string]*resourceStateV1 `json:"resources"` - - // Dependencies are a list of things that this module relies on - // existing to remain intact. For example: an module may depend - // on a VPC ID given by an aws_vpc resource. - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a module that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` -} - -type resourceStateV1 struct { - // This is filled in and managed by Terraform, and is the resource - // type itself such as "mycloud_instance". If a resource provider sets - // this value, it won't be persisted. - Type string `json:"type"` - - // Dependencies are a list of things that this resource relies on - // existing to remain intact. For example: an AWS instance might - // depend on a subnet (which itself might depend on a VPC, and so - // on). - // - // Terraform uses this information to build valid destruction - // orders and to warn the user if they're destroying a resource that - // another resource depends on. - // - // Things can be put into this list that may not be managed by - // Terraform. If Terraform doesn't find a matching ID in the - // overall state, then it assumes it isn't managed and doesn't - // worry about it. - Dependencies []string `json:"depends_on,omitempty"` - - // Primary is the current active instance for this resource. - // It can be replaced but only after a successful creation. - // This is the instances on which providers will act. - Primary *instanceStateV1 `json:"primary"` - - // Tainted is used to track any underlying instances that - // have been created but are in a bad or unknown state and - // need to be cleaned up subsequently. In the - // standard case, there is only at most a single instance. - // However, in pathological cases, it is possible for the number - // of instances to accumulate. - Tainted []*instanceStateV1 `json:"tainted,omitempty"` - - // Deposed is used in the mechanics of CreateBeforeDestroy: the existing - // Primary is Deposed to get it out of the way for the replacement Primary to - // be created by Apply. If the replacement Primary creates successfully, the - // Deposed instance is cleaned up. If there were problems creating the - // replacement, the instance remains in the Deposed list so it can be - // destroyed in a future run. Functionally, Deposed instances are very - // similar to Tainted instances in that Terraform is only tracking them in - // order to remember to destroy them. - Deposed []*instanceStateV1 `json:"deposed,omitempty"` - - // Provider is used when a resource is connected to a provider with an alias. - // If this string is empty, the resource is connected to the default provider, - // e.g. "aws_instance" goes with the "aws" provider. - // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` -} - -type instanceStateV1 struct { - // A unique ID for this resource. This is opaque to Terraform - // and is only meant as a lookup mechanism for the providers. - ID string `json:"id"` - - // Attributes are basic information about the resource. Any keys here - // are accessible in variable format within Terraform configurations: - // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` - - // Ephemeral is used to store any state associated with this instance - // that is necessary for the Terraform run to complete, but is not - // persisted to a state file. - Ephemeral ephemeralStateV1 `json:"-"` - - // Meta is a simple K/V map that is persisted to the State but otherwise - // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta,omitempty"` -} - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go deleted file mode 100644 index 3f0418d9..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/testing.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "os" - "testing" -) - -// TestStateFile writes the given state to the path. -func TestStateFile(t *testing.T, path string, state *State) { - f, err := os.Create(path) - if err != nil { - t.Fatalf("err: %s", err) - } - defer f.Close() - - if err := WriteState(state, f); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go deleted file mode 100644 index d587c89e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ /dev/null @@ -1,63 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/helper/logging" -) - -// GraphTransformer is the interface that transformers implement. This -// interface is only for transforms that need entire graph visibility. -type GraphTransformer interface { - Transform(*Graph) error -} - -// GraphVertexTransformer is an interface that transforms a single -// Vertex within with graph. This is a specialization of GraphTransformer -// that makes it easy to do vertex replacement. -// -// The GraphTransformer that runs through the GraphVertexTransformers is -// VertexTransformer. -type GraphVertexTransformer interface { - Transform(dag.Vertex) (dag.Vertex, error) -} - -// GraphTransformIf is a helper function that conditionally returns a -// GraphTransformer given. This is useful for calling inline a sequence -// of transforms without having to split it up into multiple append() calls. -func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { - if f() { - return then - } - - return nil -} - -type graphTransformerMulti struct { - Transforms []GraphTransformer -} - -func (t *graphTransformerMulti) Transform(g *Graph) error { - var lastStepStr string - for _, t := range t.Transforms { - log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) - if err := t.Transform(g); err != nil { - return err - } - if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s ------", t, logging.Indent(thisStepStr)) - lastStepStr = thisStepStr - } else { - log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) - } - } - - return nil -} - -// GraphTransformMulti combines multiple graph transformers into a single -// GraphTransformer that runs all the individual graph transformers. -func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { - return &graphTransformerMulti{Transforms: ts} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go deleted file mode 100644 index 897a7e79..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// GraphNodeAttachProvider is an interface that must be implemented by nodes -// that want provider configurations attached. -type GraphNodeAttachProvider interface { - // Must be implemented to determine the path for the configuration - GraphNodeSubPath - - // ProviderName with no module prefix. Example: "aws". - ProviderAddr() addrs.AbsProviderConfig - - // Sets the configuration - AttachProvider(*configs.Provider) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go deleted file mode 100644 index 03f8564d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes -// that want resource configurations attached. -type GraphNodeAttachResourceConfig interface { - GraphNodeResource - - // Sets the configuration - AttachResourceConfig(*configs.Resource) -} - -// AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement -// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachResourceConfigTransformer struct { - Config *configs.Config // Config is the root node in the config tree -} - -func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - - // Go through and find GraphNodeAttachResource - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachResource implementations - arn, ok := v.(GraphNodeAttachResourceConfig) - if !ok { - continue - } - - // Determine what we're looking for - addr := arn.ResourceAddr() - - // Get the configuration. - config := t.Config.DescendentForInstance(addr.Module) - if config == nil { - log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) - continue - } - - for _, r := range config.Module.ManagedResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - } - for _, r := range config.Module.DataResources { - rAddr := r.Addr() - - if rAddr != addr.Resource { - // Not the same resource - continue - } - - log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) - arn.AttachResourceConfig(r) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go deleted file mode 100644 index 2e4c3551..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go +++ /dev/null @@ -1,99 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeAttachResourceSchema is an interface implemented by node types -// that need a resource schema attached. -type GraphNodeAttachResourceSchema interface { - GraphNodeResource - GraphNodeProviderConsumer - - AttachResourceSchema(schema *configschema.Block, version uint64) -} - -// GraphNodeAttachProviderConfigSchema is an interface implemented by node types -// that need a provider configuration schema attached. -type GraphNodeAttachProviderConfigSchema interface { - GraphNodeProvider - - AttachProviderConfigSchema(*configschema.Block) -} - -// GraphNodeAttachProvisionerSchema is an interface implemented by node types -// that need one or more provisioner schemas attached. -type GraphNodeAttachProvisionerSchema interface { - ProvisionedBy() []string - - // SetProvisionerSchema is called during transform for each provisioner - // type returned from ProvisionedBy, providing the configuration schema - // for each provisioner in turn. The implementer should save these for - // later use in evaluating provisioner configuration blocks. - AttachProvisionerSchema(name string, schema *configschema.Block) -} - -// AttachSchemaTransformer finds nodes that implement -// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or -// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each -// and then passes them to a method implemented by the node. -type AttachSchemaTransformer struct { - Schemas *Schemas -} - -func (t *AttachSchemaTransformer) Transform(g *Graph) error { - if t.Schemas == nil { - // Should never happen with a reasonable caller, but we'll return a - // proper error here anyway so that we'll fail gracefully. - return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") - } - - for _, v := range g.Vertices() { - - if tv, ok := v.(GraphNodeAttachResourceSchema); ok { - addr := tv.ResourceAddr() - mode := addr.Resource.Mode - typeName := addr.Resource.Type - providerAddr, _ := tv.ProvidedBy() - providerType := providerAddr.ProviderConfig.Type.LegacyString() - - schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) - tv.AttachResourceSchema(schema, version) - } - - if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { - providerAddr := tv.ProviderAddr() - schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type.LegacyString()) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) - tv.AttachProviderConfigSchema(schema) - } - - if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { - names := tv.ProvisionedBy() - for _, name := range names { - schema := t.Schemas.ProvisionerConfig(name) - if schema == nil { - log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) - continue - } - log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) - tv.AttachProvisionerSchema(name, schema) - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go deleted file mode 100644 index 3af7b989..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go +++ /dev/null @@ -1,68 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// GraphNodeAttachResourceState is an interface that can be implemented -// to request that a ResourceState is attached to the node. -// -// Due to a historical naming inconsistency, the type ResourceState actually -// represents the state for a particular _instance_, while InstanceState -// represents the values for that instance during a particular phase -// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState -// is supported only for nodes that represent resource instances, even though -// the name might suggest it is for containing resources. -type GraphNodeAttachResourceState interface { - GraphNodeResourceInstance - - // Sets the state - AttachResourceState(*states.Resource) -} - -// AttachStateTransformer goes through the graph and attaches -// state to nodes that implement the interfaces above. -type AttachStateTransformer struct { - State *states.State // State is the root state -} - -func (t *AttachStateTransformer) Transform(g *Graph) error { - // If no state, then nothing to do - if t.State == nil { - log.Printf("[DEBUG] Not attaching any node states: overall state is nil") - return nil - } - - for _, v := range g.Vertices() { - // Nodes implement this interface to request state attachment. - an, ok := v.(GraphNodeAttachResourceState) - if !ok { - continue - } - addr := an.ResourceInstanceAddr() - - rs := t.State.Resource(addr.ContainingResource()) - if rs == nil { - log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - is := rs.Instance(addr.Resource.Key) - if is == nil { - // We don't actually need this here, since we'll attach the whole - // resource state, but we still check because it'd be weird - // for the specific instance we're attaching to not to exist. - log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) - continue - } - - // make sure to attach a copy of the state, so instances can modify the - // same ResourceState. - an.AttachResourceState(rs.DeepCopy()) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go deleted file mode 100644 index 9d3b6f4b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ /dev/null @@ -1,133 +0,0 @@ -package terraform - -import ( - "log" - "sync" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// ConfigTransformer is a GraphTransformer that adds all the resources -// from the configuration to the graph. -// -// The module used to configure this transformer must be the root module. -// -// Only resources are added to the graph. Variables, outputs, and -// providers must be added via other transforms. -// -// Unlike ConfigTransformerOld, this transformer creates a graph with -// all resources including module resources, rather than creating module -// nodes that are then "flattened". -type ConfigTransformer struct { - Concrete ConcreteResourceNodeFunc - - // Module is the module to add resources from. - Config *configs.Config - - // Unique will only add resources that aren't already present in the graph. - Unique bool - - // Mode will only add resources that match the given mode - ModeFilter bool - Mode addrs.ResourceMode - - l sync.Mutex - uniqueMap map[string]struct{} -} - -func (t *ConfigTransformer) Transform(g *Graph) error { - // Lock since we use some internal state - t.l.Lock() - defer t.l.Unlock() - - // If no configuration is available, we don't do anything - if t.Config == nil { - return nil - } - - // Reset the uniqueness map. If we're tracking uniques, then populate - // it with addresses. - t.uniqueMap = make(map[string]struct{}) - defer func() { t.uniqueMap = nil }() - if t.Unique { - for _, v := range g.Vertices() { - if rn, ok := v.(GraphNodeResource); ok { - t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} - } - } - } - - // Start the transformation process - return t.transform(g, t.Config) -} - -func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { - // If no config, do nothing - if config == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, config); err != nil { - return err - } - - // Transform all the children. - for _, c := range config.Children { - if err := t.transform(g, c); err != nil { - return err - } - } - - return nil -} - -func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { - path := config.Path - module := config.Module - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) - - // For now we assume that each module call produces only one module - // instance with no key, since we don't yet support "count" and "for_each" - // on modules. - // FIXME: As part of supporting "count" and "for_each" on modules, rework - // this so that we'll "expand" the module call first and then create graph - // nodes for each module instance separately. - instPath := path.UnkeyedInstanceShim() - - allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) - for _, r := range module.ManagedResources { - allResources = append(allResources, r) - } - for _, r := range module.DataResources { - allResources = append(allResources, r) - } - - for _, r := range allResources { - relAddr := r.Addr() - - if t.ModeFilter && relAddr.Mode != t.Mode { - // Skip non-matching modes - continue - } - - addr := relAddr.Absolute(instPath) - if _, ok := t.uniqueMap[addr.String()]; ok { - // We've already seen a resource with this address. This should - // never happen, because we enforce uniqueness in the config loader. - continue - } - - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go deleted file mode 100644 index 01601bdd..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// CountBoundaryTransformer adds a node that depends on everything else -// so that it runs last in order to clean up the state for nodes that -// are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct { - Config *configs.Config -} - -func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{ - Config: t.Config, - } - g.Add(node) - - // Depends on everything - for _, v := range g.Vertices() { - // Don't connect to ourselves - if v == node { - continue - } - - // Connect! - g.Connect(dag.BasicEdge(node, v)) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go deleted file mode 100644 index 410a709e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go +++ /dev/null @@ -1,319 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers, or might plan a create-before-destroy -// action. -type GraphNodeDestroyerCBD interface { - // CreateBeforeDestroy returns true if this node represents a node - // that is doing a CBD. - CreateBeforeDestroy() bool - - // ModifyCreateBeforeDestroy is called when the CBD state of a node - // is changed dynamically. This can return an error if this isn't - // allowed. - ModifyCreateBeforeDestroy(bool) error -} - -// GraphNodeAttachDestroyer is implemented by applyable nodes that have a -// companion destroy node. This allows the creation node to look up the status -// of the destroy node and determine if it needs to depose the existing state, -// or replace it. -// If a node is not marked as create-before-destroy in the configuration, but a -// dependency forces that status, only the destroy node will be aware of that -// status. -type GraphNodeAttachDestroyer interface { - // AttachDestroyNode takes a destroy node and saves a reference to that - // node in the receiver, so it can later check the status of - // CreateBeforeDestroy(). - AttachDestroyNode(n GraphNodeDestroyerCBD) -} - -// ForcedCBDTransformer detects when a particular CBD-able graph node has -// dependencies with another that has create_before_destroy set that require -// it to be forced on, and forces it on. -// -// This must be used in the plan graph builder to ensure that -// create_before_destroy settings are properly propagated before constructing -// the planned changes. This requires that the plannable resource nodes -// implement GraphNodeDestroyerCBD. -type ForcedCBDTransformer struct { -} - -func (t *ForcedCBDTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - // If there are no CBD decendent (dependent nodes), then we - // do nothing here. - if !t.hasCBDDescendent(g, v) { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) - continue - } - - // If this isn't naturally a CBD node, this means that an descendent is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } - } else { - log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) - } - } - return nil -} - -// hasCBDDescendent returns true if any descendent (node that depends on this) -// has CBD set. -func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { - s, _ := g.Descendents(v) - if s == nil { - return true - } - - for _, ov := range s.List() { - dn, ok := ov.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some descendent is CreateBeforeDestroy, so we need to follow suit - log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) - return true - } - } - - return false -} - -// CBDEdgeTransformer modifies the edges of CBD nodes that went through -// the DestroyEdgeTransformer to have the right dependencies. There are -// two real tasks here: -// -// 1. With CBD, the destroy edge is inverted: the destroy depends on -// the creation. -// -// 2. A_d must depend on resources that depend on A. This is to enable -// the destroy to only happen once nodes that depend on A successfully -// update to A. Example: adding a web server updates the load balancer -// before deleting the old web server. -// -// This transformer requires that a previous transformer has already forced -// create_before_destroy on for nodes that are depended on by explicit CBD -// nodes. This is the logic in ForcedCBDTransformer, though in practice we -// will get here by recording the CBD-ness of each change in the plan during -// the plan walk and then forcing the nodes into the appropriate setting during -// DiffTransformer when building the apply graph. -type CBDEdgeTransformer struct { - // Module and State are only needed to look up dependencies in - // any way possible. Either can be nil if not availabile. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners so we can - // properly resolve implicit dependencies. - Schemas *Schemas - - // If the operation is a simple destroy, no transformation is done. - Destroy bool -} - -func (t *CBDEdgeTransformer) Transform(g *Graph) error { - if t.Destroy { - return nil - } - - // Go through and reverse any destroy edges - destroyMap := make(map[string][]dag.Vertex) - for _, v := range g.Vertices() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - dern, ok := v.(GraphNodeDestroyer) - if !ok { - continue - } - - if !dn.CreateBeforeDestroy() { - continue - } - - // Find the resource edges - for _, e := range g.EdgesTo(v) { - switch de := e.(type) { - case *DestroyEdge: - // we need to invert the destroy edge from the create node - log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", - dag.VertexName(de.Source()), dag.VertexName(de.Target())) - - // Found it! Invert. - g.RemoveEdge(de) - applyNode := de.Source() - destroyNode := de.Target() - g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) - default: - // We cannot have any direct dependencies from creators when - // the node is CBD without inducing a cycle. - if _, ok := e.Source().(GraphNodeCreator); ok { - log.Printf("[TRACE] CBDEdgeTransformer: removing non DestroyEdge to CBD destroy node: %s => %s", dag.VertexName(e.Source()), dag.VertexName(e.Target())) - g.RemoveEdge(e) - } - } - } - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCountBoth) so it'll be caught. - addr := dern.DestroyAddr() - key := addr.ContainingResource().String() - - // Add this to the list of nodes that we need to fix up - // the edges for (step 2 above in the docs). - destroyMap[key] = append(destroyMap[key], v) - } - - // If we have no CBD nodes, then our work here is done - if len(destroyMap) == 0 { - return nil - } - - // We have CBD nodes. We now have to move on to the much more difficult - // task of connecting dependencies of the creation side of the destroy - // to the destruction node. The easiest way to explain this is an example: - // - // Given a pre-destroy dependence of: A => B - // And A has CBD set. - // - // The resulting graph should be: A => B => A_d - // - // They key here is that B happens before A is destroyed. This is to - // facilitate the primary purpose for CBD: making sure that downstreams - // are properly updated to avoid downtime before the resource is destroyed. - depMap, err := t.depMap(g, destroyMap) - if err != nil { - return err - } - - // We now have the mapping of resource addresses to the destroy - // nodes they need to depend on. We now go through our own vertices to - // find any matching these addresses and make the connection. - for _, v := range g.Vertices() { - // We're looking for creators - rn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - // Get the address - addr := rn.CreateAddr() - - // If the address has an index, we strip that. Our depMap creation - // graph doesn't expand counts so we don't currently get _exact_ - // dependencies. One day when we limit dependencies more exactly - // this will have to change. We have a test case covering this - // (depNonCBDCount) so it'll be caught. - key := addr.ContainingResource().String() - - // If there is nothing this resource should depend on, ignore it - dns, ok := depMap[key] - if !ok { - continue - } - - // We have nodes! Make the connection - for _, dn := range dns { - log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", - dag.VertexName(dn), dag.VertexName(v)) - g.Connect(dag.BasicEdge(dn, v)) - } - } - - return nil -} - -func (t *CBDEdgeTransformer) depMap(g *Graph, destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { - // Build the list of destroy nodes that each resource address should depend - // on. For example, when we find B, we map the address of B to A_d in the - // "depMap" variable below. - - // Use a nested map to remove duplicate edges. - depMap := make(map[string]map[dag.Vertex]struct{}) - for _, v := range g.Vertices() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Get the address - addr := rn.ResourceAddr() - key := addr.String() - - // Get the destroy nodes that are destroying this resource. - // If there aren't any, then we don't need to worry about - // any connections. - dns, ok := destroyMap[key] - if !ok { - continue - } - - // Get the nodes that depend on this on. In the example above: - // finding B in A => B. Since dependencies can span modules, walk all - // descendents of the resource. - des, _ := g.Descendents(v) - for _, v := range des.List() { - // We're looking for resources. - rn, ok := v.(GraphNodeResource) - if !ok { - continue - } - - // Keep track of the destroy nodes that this address - // needs to depend on. - key := rn.ResourceAddr().String() - - deps, ok := depMap[key] - if !ok { - deps = make(map[dag.Vertex]struct{}) - } - - for _, d := range dns { - deps[d] = struct{}{} - } - depMap[key] = deps - } - } - - result := map[string][]dag.Vertex{} - for k, m := range depMap { - for v := range m { - result[k] = append(result[k], v) - } - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go deleted file mode 100644 index f5242922..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ /dev/null @@ -1,364 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeDestroyer must be implemented by nodes that destroy resources. -type GraphNodeDestroyer interface { - dag.Vertex - - // DestroyAddr is the address of the resource that is being - // destroyed by this node. If this returns nil, then this node - // is not destroying anything. - DestroyAddr() *addrs.AbsResourceInstance -} - -// GraphNodeCreator must be implemented by nodes that create OR update resources. -type GraphNodeCreator interface { - // CreateAddr is the address of the resource being created or updated - CreateAddr() *addrs.AbsResourceInstance -} - -// DestroyEdgeTransformer is a GraphTransformer that creates the proper -// references for destroy resources. Destroy resources are more complex -// in that they must be depend on the destruction of resources that -// in turn depend on the CREATION of the node being destroy. -// -// That is complicated. Visually: -// -// B_d -> A_d -> A -> B -// -// Notice that A destroy depends on B destroy, while B create depends on -// A create. They're inverted. This must be done for example because often -// dependent resources will block parent resources from deleting. Concrete -// example: VPC with subnets, the VPC can't be deleted while there are -// still subnets. -type DestroyEdgeTransformer struct { - // These are needed to properly build the graph of dependencies - // to determine what a destroy node depends on. Any of these can be nil. - Config *configs.Config - State *states.State - - // If configuration is present then Schemas is required in order to - // obtain schema information from providers and provisioners in order - // to properly resolve implicit dependencies. - Schemas *Schemas -} - -func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. - destroyers := make(map[string][]GraphNodeDestroyer) - destroyerAddrs := make(map[string]addrs.AbsResourceInstance) - - // Record the creators, which will need to depend on the destroyers if they - // are only being updated. - creators := make(map[string]GraphNodeCreator) - - // destroyersByResource records each destroyer by the AbsResourceAddress. - // We use this because dependencies are only referenced as resources, but we - // will want to connect all the individual instances for correct ordering. - destroyersByResource := make(map[string][]GraphNodeDestroyer) - for _, v := range g.Vertices() { - switch n := v.(type) { - case GraphNodeDestroyer: - addrP := n.DestroyAddr() - if addrP == nil { - log.Printf("[WARN] DestroyEdgeTransformer: %q (%T) has no destroy address", dag.VertexName(n), v) - continue - } - addr := *addrP - - key := addr.String() - log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(n), v, key) - destroyers[key] = append(destroyers[key], n) - destroyerAddrs[key] = addr - - resAddr := addr.Resource.Resource.Absolute(addr.Module).String() - destroyersByResource[resAddr] = append(destroyersByResource[resAddr], n) - case GraphNodeCreator: - addr := n.CreateAddr() - creators[addr.String()] = n - } - } - - // If we aren't destroying anything, there will be no edges to make - // so just exit early and avoid future work. - if len(destroyers) == 0 { - return nil - } - - // Connect destroy despendencies as stored in the state - for _, ds := range destroyers { - for _, des := range ds { - ri, ok := des.(GraphNodeResourceInstance) - if !ok { - continue - } - - for _, resAddr := range ri.StateDependencies() { - for _, desDep := range destroyersByResource[resAddr.String()] { - log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(desDep), dag.VertexName(des)) - g.Connect(dag.BasicEdge(desDep, des)) - - } - } - } - } - - // connect creators to any destroyers on which they may depend - for _, c := range creators { - ri, ok := c.(GraphNodeResourceInstance) - if !ok { - continue - } - - for _, resAddr := range ri.StateDependencies() { - for _, desDep := range destroyersByResource[resAddr.String()] { - log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(c), dag.VertexName(desDep)) - g.Connect(dag.BasicEdge(c, desDep)) - - } - } - } - - // Go through and connect creators to destroyers. Going along with - // our example, this makes: A_d => A - for _, v := range g.Vertices() { - cn, ok := v.(GraphNodeCreator) - if !ok { - continue - } - - addr := cn.CreateAddr() - if addr == nil { - continue - } - - for _, d := range destroyers[addr.String()] { - // For illustrating our example - a_d := d.(dag.Vertex) - a := v - - log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", - dag.VertexName(a), dag.VertexName(a_d)) - - g.Connect(&DestroyEdge{S: a, T: a_d}) - - // Attach the destroy node to the creator - // There really shouldn't be more than one destroyer, but even if - // there are, any of them will represent the correct - // CreateBeforeDestroy status. - if n, ok := cn.(GraphNodeAttachDestroyer); ok { - if d, ok := d.(GraphNodeDestroyerCBD); ok { - n.AttachDestroyNode(d) - } - } - } - } - - // This is strange but is the easiest way to get the dependencies - // of a node that is being destroyed. We use another graph to make sure - // the resource is in the graph and ask for references. We have to do this - // because the node that is being destroyed may NOT be in the graph. - // - // Example: resource A is force new, then destroy A AND create A are - // in the graph. BUT if resource A is just pure destroy, then only - // destroy A is in the graph, and create A is not. - providerFn := func(a *NodeAbstractProvider) dag.Vertex { - return &NodeApplyableProvider{NodeAbstractProvider: a} - } - steps := []GraphTransformer{ - // Add the local values - &LocalTransformer{Config: t.Config}, - - // Add outputs and metadata - &OutputTransformer{Config: t.Config}, - &AttachResourceConfigTransformer{Config: t.Config}, - &AttachStateTransformer{State: t.State}, - - // Add all the variables. We can depend on resources through - // variables due to module parameters, and we need to properly - // determine that. - &RootVariableTransformer{Config: t.Config}, - &ModuleVariableTransformer{Config: t.Config}, - - TransformProviders(nil, providerFn, t.Config), - - // Must attach schemas before ReferenceTransformer so that we can - // analyze the configuration to find references. - &AttachSchemaTransformer{Schemas: t.Schemas}, - - &ReferenceTransformer{}, - } - - // Go through all the nodes being destroyed and create a graph. - // The resulting graph is only of things being CREATED. For example, - // following our example, the resulting graph would be: - // - // A, B (with no edges) - // - var tempG Graph - var tempDestroyed []dag.Vertex - for d := range destroyers { - // d is the string key for the resource being destroyed. We actually - // want the address value, which we stashed earlier. - addr := destroyerAddrs[d] - - // This part is a little bit weird but is the best way to - // find the dependencies we need to: build a graph and use the - // attach config and state transformers then ask for references. - abstract := NewNodeAbstractResourceInstance(addr) - tempG.Add(abstract) - tempDestroyed = append(tempDestroyed, abstract) - - // We also add the destroy version here since the destroy can - // depend on things that the creation doesn't (destroy provisioners). - destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} - tempG.Add(destroy) - tempDestroyed = append(tempDestroyed, destroy) - } - - // Run the graph transforms so we have the information we need to - // build references. - log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) - for _, s := range steps { - log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) - if err := s.Transform(&tempG); err != nil { - log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) - return err - } - } - log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) - - // Go through all the nodes in the graph and determine what they - // depend on. - for _, v := range tempDestroyed { - // Find all ancestors of this to determine the edges we'll depend on - vs, err := tempG.Ancestors(v) - if err != nil { - return err - } - - refs := make([]dag.Vertex, 0, vs.Len()) - for _, raw := range vs.List() { - refs = append(refs, raw.(dag.Vertex)) - } - - refNames := make([]string, len(refs)) - for i, ref := range refs { - refNames[i] = dag.VertexName(ref) - } - log.Printf( - "[TRACE] DestroyEdgeTransformer: creation node %q references %s", - dag.VertexName(v), refNames) - - // If we have no references, then we won't need to do anything - if len(refs) == 0 { - continue - } - - // Get the destroy node for this. In the example of our struct, - // we are currently at B and we're looking for B_d. - rn, ok := v.(GraphNodeResourceInstance) - if !ok { - log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) - continue - } - - addr := rn.ResourceInstanceAddr() - dns := destroyers[addr.String()] - - // We have dependencies, check if any are being destroyed - // to build the list of things that we must depend on! - // - // In the example of the struct, if we have: - // - // B_d => A_d => A => B - // - // Then at this point in the algorithm we started with B_d, - // we built B (to get dependencies), and we found A. We're now looking - // to see if A_d exists. - var depDestroyers []dag.Vertex - for _, v := range refs { - rn, ok := v.(GraphNodeResourceInstance) - if !ok { - continue - } - - addr := rn.ResourceInstanceAddr() - key := addr.String() - if ds, ok := destroyers[key]; ok { - for _, d := range ds { - depDestroyers = append(depDestroyers, d.(dag.Vertex)) - log.Printf( - "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", - key, dag.VertexName(d)) - } - } - } - - // Go through and make the connections. Use the variable - // names "a_d" and "b_d" to reference our example. - for _, a_d := range dns { - for _, b_d := range depDestroyers { - if b_d != a_d { - log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) - g.Connect(dag.BasicEdge(b_d, a_d)) - } - } - } - } - - return t.pruneResources(g) -} - -// If there are only destroy instances for a particular resource, there's no -// reason for the resource node to prepare the state. Remove Resource nodes so -// that they don't fail by trying to evaluate a resource that is only being -// destroyed along with its dependencies. -func (t *DestroyEdgeTransformer) pruneResources(g *Graph) error { - for _, v := range g.Vertices() { - n, ok := v.(*NodeApplyableResource) - if !ok { - continue - } - - // if there are only destroy dependencies, we don't need this node - des, err := g.Descendents(n) - if err != nil { - return err - } - - descendents := des.List() - nonDestroyInstanceFound := false - for _, v := range descendents { - if _, ok := v.(*NodeApplyableResourceInstance); ok { - nonDestroyInstanceFound = true - break - } - } - - if nonDestroyInstanceFound { - continue - } - - // connect all the through-edges, then delete the node - for _, d := range g.DownEdges(n).List() { - for _, u := range g.UpEdges(n).List() { - g.Connect(dag.BasicEdge(u, d)) - } - } - log.Printf("DestroyEdgeTransformer: pruning unused resource node %s", dag.VertexName(n)) - g.Remove(n) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go deleted file mode 100644 index 23b6e2a7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go +++ /dev/null @@ -1,184 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// DiffTransformer is a GraphTransformer that adds graph nodes representing -// each of the resource changes described in the given Changes object. -type DiffTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - State *states.State - Changes *plans.Changes -} - -func (t *DiffTransformer) Transform(g *Graph) error { - if t.Changes == nil || len(t.Changes.Resources) == 0 { - // Nothing to do! - return nil - } - - // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer starting") - - var diags tfdiags.Diagnostics - state := t.State - changes := t.Changes - - // DiffTransformer creates resource _instance_ nodes. If there are any - // whole-resource nodes already in the graph, we must ensure that they - // get evaluated before any of the corresponding instances by creating - // dependency edges, so we'll do some prep work here to ensure we'll only - // create connections to nodes that existed before we started here. - resourceNodes := map[string][]GraphNodeResource{} - for _, node := range g.Vertices() { - rn, ok := node.(GraphNodeResource) - if !ok { - continue - } - // We ignore any instances that _also_ implement - // GraphNodeResourceInstance, since in the unlikely event that they - // do exist we'd probably end up creating cycles by connecting them. - if _, ok := node.(GraphNodeResourceInstance); ok { - continue - } - - addr := rn.ResourceAddr().String() - resourceNodes[addr] = append(resourceNodes[addr], rn) - } - - for _, rc := range changes.Resources { - addr := rc.Addr - dk := rc.DeposedKey - - log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) - - // Depending on the action we'll need some different combinations of - // nodes, because destroying uses a special node type separate from - // other actions. - var update, delete, createBeforeDestroy bool - switch rc.Action { - case plans.NoOp: - continue - case plans.Delete: - delete = true - case plans.DeleteThenCreate, plans.CreateThenDelete: - update = true - delete = true - createBeforeDestroy = (rc.Action == plans.CreateThenDelete) - default: - update = true - } - - if dk != states.NotDeposed && update { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change for deposed object", - fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), - )) - continue - } - - // If we're going to do a create_before_destroy Replace operation then - // we need to allocate a DeposedKey to use to retain the - // not-yet-destroyed prior object, so that the delete node can destroy - // _that_ rather than the newly-created node, which will be current - // by the time the delete node is visited. - if update && delete && createBeforeDestroy { - // In this case, variable dk will be the _pre-assigned_ DeposedKey - // that must be used if the update graph node deposes the current - // instance, which will then align with the same key we pass - // into the destroy node to ensure we destroy exactly the deposed - // object we expect. - if state != nil { - ris := state.ResourceInstance(addr) - if ris == nil { - // Should never happen, since we don't plan to replace an - // instance that doesn't exist yet. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid planned change", - fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), - )) - continue - } - - // Allocating a deposed key separately from using it can be racy - // in general, but we assume here that nothing except the apply - // node we instantiate below will actually make new deposed objects - // in practice, and so the set of already-used keys will not change - // between now and then. - dk = ris.FindUnusedDeposedKey() - } else { - // If we have no state at all yet then we can use _any_ - // DeposedKey. - dk = states.NewDeposedKey() - } - } - - if update { - // All actions except destroying the node type chosen by t.Concrete - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - if createBeforeDestroy { - // We'll attach our pre-allocated DeposedKey to the node if - // it supports that. NodeApplyableResourceInstance is the - // specific concrete node type we are looking for here really, - // since that's the only node type that might depose objects. - if dn, ok := node.(GraphNodeDeposer); ok { - dn.SetPreallocatedDeposedKey(dk) - } - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) - } else { - log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) - } - - g.Add(node) - rsrcAddr := addr.ContainingResource().String() - for _, rsrcNode := range resourceNodes[rsrcAddr] { - g.Connect(dag.BasicEdge(node, rsrcNode)) - } - } - - if delete { - // Destroying always uses a destroy-specific node type, though - // which one depends on whether we're destroying a current object - // or a deposed object. - var node GraphNodeResourceInstance - abstract := NewNodeAbstractResourceInstance(addr) - if dk == states.NotDeposed { - node = &NodeDestroyResourceInstance{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) - } else { - node = &NodeDestroyDeposedResourceInstanceObject{ - NodeAbstractResourceInstance: abstract, - DeposedKey: dk, - } - } - if dk == states.NotDeposed { - log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) - } else { - log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) - } - g.Add(node) - } - - } - - log.Printf("[TRACE] DiffTransformer complete") - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go deleted file mode 100644 index 982c098b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeExapndable is an interface that nodes can implement to -// signal that they can be expanded. Expanded nodes turn into -// GraphNodeSubgraph nodes within the graph. -type GraphNodeExpandable interface { - Expand(GraphBuilder) (GraphNodeSubgraph, error) -} - -// GraphNodeDynamicExpandable is an interface that nodes can implement -// to signal that they can be expanded at eval-time (hence dynamic). -// These nodes are given the eval context and are expected to return -// a new subgraph. -type GraphNodeDynamicExpandable interface { - DynamicExpand(EvalContext) (*Graph, error) -} - -// GraphNodeSubgraph is an interface a node can implement if it has -// a larger subgraph that should be walked. -type GraphNodeSubgraph interface { - Subgraph() dag.Grapher -} - -// ExpandTransform is a transformer that does a subgraph expansion -// at graph transform time (vs. at eval time). The benefit of earlier -// subgraph expansion is that errors with the graph build can be detected -// at an earlier stage. -type ExpandTransform struct { - Builder GraphBuilder -} - -func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { - ev, ok := v.(GraphNodeExpandable) - if !ok { - // This isn't an expandable vertex, so just ignore it. - return v, nil - } - - // Expand the subgraph! - log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) - return ev.Expand(t.Builder) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go deleted file mode 100644 index c801e5c8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportProviderValidateTransformer is a GraphTransformer that goes through -// the providers in the graph and validates that they only depend on variables. -type ImportProviderValidateTransformer struct{} - -func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { - var diags tfdiags.Diagnostics - - for _, v := range g.Vertices() { - // We only care about providers - pv, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // We only care about providers that reference things - rn, ok := pv.(GraphNodeReferencer) - if !ok { - continue - } - - for _, ref := range rn.References() { - if _, ok := ref.Subject.(addrs.InputVariable); !ok { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid provider dependency for import", - Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), - Subject: ref.SourceRange.ToHCL().Ptr(), - }) - } - } - } - - return diags.Err() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go deleted file mode 100644 index 7bda3121..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ /dev/null @@ -1,239 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/tfdiags" -) - -// ImportStateTransformer is a GraphTransformer that adds nodes to the -// graph to represent the imports we want to do for resources. -type ImportStateTransformer struct { - Targets []*ImportTarget -} - -func (t *ImportStateTransformer) Transform(g *Graph) error { - for _, target := range t.Targets { - // The ProviderAddr may not be supplied for non-aliased providers. - // This will be populated if the targets come from the cli, but tests - // may not specify implied provider addresses. - providerAddr := target.ProviderAddr - if providerAddr.ProviderConfig.Type.Type == "" { - providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) - } - - node := &graphNodeImportState{ - Addr: target.Addr, - ID: target.ID, - ProviderAddr: providerAddr, - } - g.Add(node) - } - return nil -} - -type graphNodeImportState struct { - Addr addrs.AbsResourceInstance // Addr is the resource address to import into - ID string // ID is the ID to import as - ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type - ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - - states []providers.ImportedResource -} - -var ( - _ GraphNodeSubPath = (*graphNodeImportState)(nil) - _ GraphNodeEvalable = (*graphNodeImportState)(nil) - _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) - _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) -) - -func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { - // We assume that n.ProviderAddr has been properly populated here. - // It's the responsibility of the code creating a graphNodeImportState - // to populate this, possibly by calling DefaultProviderConfig() on the - // resource address to infer an implied provider from the resource type - // name. - return n.ProviderAddr, false -} - -// GraphNodeProviderConsumer -func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { - n.ResolvedProvider = addr -} - -// GraphNodeSubPath -func (n *graphNodeImportState) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportState) EvalTree() EvalNode { - var provider providers.Interface - - // Reset our states - n.states = nil - - // Return our sequence - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - }, - &EvalImportState{ - Addr: n.Addr.Resource, - Provider: &provider, - ID: n.ID, - Output: &n.states, - }, - }, - } -} - -// GraphNodeDynamicExpandable impl. -// -// We use DynamicExpand as a way to generate the subgraph of refreshes -// and state inserts we need to do for our import state. Since they're new -// resources they don't depend on anything else and refreshes are isolated -// so this is nearly a perfect use case for dynamic expand. -func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { - var diags tfdiags.Diagnostics - - g := &Graph{Path: ctx.Path()} - - // nameCounter is used to de-dup names in the state. - nameCounter := make(map[string]int) - - // Compile the list of addresses that we'll be inserting into the state. - // We do this ahead of time so we can verify that we aren't importing - // something that already exists. - addrs := make([]addrs.AbsResourceInstance, len(n.states)) - for i, state := range n.states { - addr := n.Addr - if t := state.TypeName; t != "" { - addr.Resource.Resource.Type = t - } - - // Determine if we need to suffix the name to de-dup - key := addr.String() - count, ok := nameCounter[key] - if ok { - count++ - addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) - } - nameCounter[key] = count - - // Add it to our list - addrs[i] = addr - } - - // Verify that all the addresses are clear - state := ctx.State() - for _, addr := range addrs { - existing := state.ResourceInstance(addr) - if existing != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Resource already managed by Terraform", - fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), - )) - continue - } - } - if diags.HasErrors() { - // Bail out early, then. - return nil, diags.Err() - } - - // For each of the states, we add a node to handle the refresh/add to state. - // "n.states" is populated by our own EvalTree with the result of - // ImportState. Since DynamicExpand is always called after EvalTree, this - // is safe. - for i, state := range n.states { - g.Add(&graphNodeImportStateSub{ - TargetAddr: addrs[i], - State: state, - ResolvedProvider: n.ResolvedProvider, - }) - } - - // Root transform for a single root - t := &RootTransformer{} - if err := t.Transform(g); err != nil { - return nil, err - } - - // Done! - return g, diags.Err() -} - -// graphNodeImportStateSub is the sub-node of graphNodeImportState -// and is part of the subgraph. This node is responsible for refreshing -// and adding a resource to the state once it is imported. -type graphNodeImportStateSub struct { - TargetAddr addrs.AbsResourceInstance - State providers.ImportedResource - ResolvedProvider addrs.AbsProviderConfig -} - -var ( - _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) - _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) -) - -func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result", n.TargetAddr) -} - -func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { - return n.TargetAddr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeImportStateSub) EvalTree() EvalNode { - // If the Ephemeral type isn't set, then it is an error - if n.State.TypeName == "" { - err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) - return &EvalReturnError{Error: &err} - } - - state := n.State.AsInstanceObject() - - var provider providers.Interface - var providerSchema *ProviderSchema - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Addr: n.ResolvedProvider, - Output: &provider, - Schema: &providerSchema, - }, - &EvalRefresh{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - }, - &EvalImportStateVerify{ - Addr: n.TargetAddr.Resource, - State: &state, - }, - &EvalWriteState{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go deleted file mode 100644 index 84eb26b2..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go +++ /dev/null @@ -1,48 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/configs" -) - -// LocalTransformer is a GraphTransformer that adds all the local values -// from the configuration to the graph. -type LocalTransformer struct { - Config *configs.Config -} - -func (t *LocalTransformer) Transform(g *Graph) error { - return t.transformModule(g, t.Config) -} - -func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { - if c == nil { - // Can't have any locals if there's no config - return nil - } - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - - for _, local := range c.Module.Locals { - addr := path.LocalValue(local.Name) - node := &NodeLocal{ - Addr: addr, - Config: local, - } - g.Add(node) - } - - // Also populate locals for child modules - for _, cc := range c.Children { - if err := t.transformModule(g, cc); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go deleted file mode 100644 index 18e0b2d1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go +++ /dev/null @@ -1,126 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/configs" -) - -// ModuleVariableTransformer is a GraphTransformer that adds all the variables -// in the configuration to the graph. -// -// Any "variable" block present in any non-root module is included here, even -// if a particular variable is not referenced from anywhere. -// -// The transform will produce errors if a call to a module does not conform -// to the expected set of arguments, but this transformer is not in a good -// position to return errors and so the validate walk should include specific -// steps for validating module blocks, separate from this transform. -type ModuleVariableTransformer struct { - Config *configs.Config -} - -func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Config) -} - -func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { - // We can have no variables if we have no configuration. - if c == nil { - return nil - } - - // Transform all the children first. - for _, cc := range c.Children { - if err := t.transform(g, c, cc); err != nil { - return err - } - } - - // If we're processing anything other than the root module then we'll - // add graph nodes for variables defined inside. (Variables for the root - // module are dealt with in RootVariableTransformer). - // If we have a parent, we can determine if a module variable is being - // used, so we transform this. - if parent != nil { - if err := t.transformSingle(g, parent, c); err != nil { - return err - } - } - - return nil -} - -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - _, call := path.Call() - - // Find the call in the parent module configuration, so we can get the - // expressions given for each input variable at the call site. - callConfig, exists := parent.Module.ModuleCalls[call.Name] - if !exists { - // This should never happen, since it indicates an improperly-constructed - // configuration tree. - panic(fmt.Errorf("no module call block found for %s", path)) - } - - // We need to construct a schema for the expected call arguments based on - // the configured variables in our config, which we can then use to - // decode the content of the call block. - schema := &hcl.BodySchema{} - for _, v := range c.Module.Variables { - schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ - Name: v.Name, - Required: v.Default == cty.NilVal, - }) - } - - content, contentDiags := callConfig.Config.Content(schema) - if contentDiags.HasErrors() { - // Validation code elsewhere should deal with any errors before we - // get in here, but we'll report them out here just in case, to - // avoid crashes. - var diags tfdiags.Diagnostics - diags = diags.Append(contentDiags) - return diags.Err() - } - - for _, v := range c.Module.Variables { - var expr hcl.Expression - if attr := content.Attributes[v.Name]; attr != nil { - expr = attr.Expr - } else { - // No expression provided for this variable, so we'll make a - // synthetic one using the variable's default value. - expr = &hclsyntax.LiteralValueExpr{ - Val: v.Default, - SrcRange: v.DeclRange, // This is not exact, but close enough - } - } - - // For now we treat all module variables as "applyable", even though - // such nodes are valid to use on other walks too. We may specialize - // this in future if we find reasons to employ different behaviors - // in different scenarios. - node := &NodeApplyableModuleVariable{ - Addr: path.InputVariable(v.Name), - Config: v, - Expr: expr, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go deleted file mode 100644 index 40163cf9..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go +++ /dev/null @@ -1,175 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" - "github.com/zclconf/go-cty/cty" -) - -// OrphanResourceCountTransformer is a GraphTransformer that adds orphans -// for an expanded count to the graph. The determination of this depends -// on the count argument given. -// -// Orphans are found by comparing the count to what is found in the state. -// This transform assumes that if an element in the state is within the count -// bounds given, that it is not an orphan. -type OrphanResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - Count int // Actual count of the resource, or -1 if count is not set at all - ForEach map[string]cty.Value // The ForEach map on the resource - Addr addrs.AbsResource // Addr of the resource to look for orphans - State *states.State // Full global state -} - -func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { - rs := t.State.Resource(t.Addr) - if rs == nil { - return nil // Resource doesn't exist in state, so nothing to do! - } - - haveKeys := make(map[addrs.InstanceKey]struct{}) - for key := range rs.Instances { - haveKeys[key] = struct{}{} - } - - // if for_each is set, use that transformer - if t.ForEach != nil { - return t.transformForEach(haveKeys, g) - } - if t.Count < 0 { - return t.transformNoCount(haveKeys, g) - } - if t.Count == 0 { - return t.transformZeroCount(haveKeys, g) - } - return t.transformCount(haveKeys, g) -} - -func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // If there is a NoKey node, add this to the graph first, - // so that we can create edges to it in subsequent (StringKey) nodes. - // This is because the last item determines the resource mode for the whole resource, - // (see SetResourceInstanceCurrent for more information) and we need to evaluate - // an orphaned (NoKey) resource before the in-memory state is updated - // to deal with a new for_each resource - _, hasNoKeyNode := haveKeys[addrs.NoKey] - var noKeyNode dag.Vertex - if hasNoKeyNode { - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(addrs.NoKey)) - noKeyNode = abstract - if f := t.Concrete; f != nil { - noKeyNode = f(abstract) - } - g.Add(noKeyNode) - } - - for key := range haveKeys { - // If the key is no-key, we have already added it, so skip - if key == addrs.NoKey { - continue - } - - s, _ := key.(addrs.StringKey) - // If the key is present in our current for_each, carry on - if _, ok := t.ForEach[string(s)]; ok { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) - g.Add(node) - - // Add edge to noKeyNode if it exists - if hasNoKeyNode { - g.Connect(dag.BasicEdge(node, noKeyNode)) - } - } - return nil -} - -func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // Due to the logic in Transform, we only get in here if our count is - // at least one. - - _, have0Key := haveKeys[addrs.IntKey(0)] - - for key := range haveKeys { - if key == addrs.NoKey && !have0Key { - // If we have no 0-key then we will accept a no-key instance - // as an alias for it. - continue - } - - i, isInt := key.(addrs.IntKey) - if isInt && int(i) < t.Count { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} - -func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // This case is easy: we need to orphan any keys we have at all. - - for key := range haveKeys { - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} - -func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { - // Negative count indicates that count is not set at all, in which - // case we expect to have a single instance with no key set at all. - // However, we'll also accept an instance with key 0 set as an alias - // for it, in case the user has just deleted the "count" argument and - // so wants to keep the first instance in the set. - - _, haveNoKey := haveKeys[addrs.NoKey] - _, have0Key := haveKeys[addrs.IntKey(0)] - keepKey := addrs.NoKey - if have0Key && !haveNoKey { - // If we don't have a no-key instance then we can use the 0-key instance - // instead. - keepKey = addrs.IntKey(0) - } - - for key := range haveKeys { - if key == keepKey { - continue - } - - abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go deleted file mode 100644 index c6754093..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ /dev/null @@ -1,60 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" -) - -// OrphanOutputTransformer finds the outputs that aren't present -// in the given config that are in the state and adds them to the graph -// for deletion. -type OrphanOutputTransformer struct { - Config *configs.Config // Root of config tree - State *states.State // State is the root state -} - -func (t *OrphanOutputTransformer) Transform(g *Graph) error { - if t.State == nil { - log.Printf("[DEBUG] No state, no orphan outputs") - return nil - } - - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - return nil -} - -func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the config for this path, which is nil if the entire module has been - // removed. - var outputs map[string]*configs.Output - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - outputs = c.Module.Outputs - } - - // An output is "orphaned" if it's present in the state but not declared - // in the configuration. - for name := range ms.OutputValues { - if _, exists := outputs[name]; exists { - continue - } - - g.Add(&NodeOutputOrphan{ - Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), - }) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go deleted file mode 100644 index 50df1781..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go +++ /dev/null @@ -1,179 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned -// resource instances to the graph. An "orphan" is an instance that is present -// in the state but belongs to a resource that is no longer present in the -// configuration. -// -// This is not the transformer that deals with "count orphans" (instances that -// are no longer covered by a resource's "count" or "for_each" setting); that's -// handled instead by OrphanResourceCountTransformer. -type OrphanResourceInstanceTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *states.State - - // Config is the root node in the configuration tree. We'll look up - // the appropriate note in this tree using the path in each node. - Config *configs.Config -} - -func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceInstanceTransformer used without setting Config") - } - - // Go through the modules and for each module transform in order - // to add the orphan. - for _, ms := range t.State.Modules { - if err := t.transform(g, ms); err != nil { - return err - } - } - - return nil -} - -func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { - if ms == nil { - return nil - } - - moduleAddr := ms.Addr - - // Get the configuration for this module. The configuration might be - // nil if the module was removed from the configuration. This is okay, - // this just means that every resource is an orphan. - var m *configs.Module - if c := t.Config.DescendentForInstance(moduleAddr); c != nil { - m = c.Module - } - - // An "orphan" is a resource that is in the state but not the configuration, - // so we'll walk the state resources and try to correlate each of them - // with a configuration block. Each orphan gets a node in the graph whose - // type is decided by t.Concrete. - // - // We don't handle orphans related to changes in the "count" and "for_each" - // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. - for _, rs := range ms.Resources { - if m != nil { - if r := m.ResourceByAddr(rs.Addr); r != nil { - continue - } - } - - for key := range rs.Instances { - addr := rs.Addr.Instance(key).Absolute(moduleAddr) - abstract := NewNodeAbstractResourceInstance(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) - g.Add(node) - } - } - - return nil -} - -// OrphanResourceTransformer is a GraphTransformer that adds orphaned -// resources to the graph. An "orphan" is a resource that is present in -// the state but no longer present in the config. -// -// This is separate to OrphanResourceInstanceTransformer in that it deals with -// whole resources, rather than individual instances of resources. Orphan -// resource nodes are only used during apply to clean up leftover empty -// resource state skeletons, after all of the instances inside have been -// removed. -// -// This transformer will also create edges in the graph to any pre-existing -// node that creates or destroys the entire orphaned resource or any of its -// instances, to ensure that the "orphan-ness" of a resource is always dealt -// with after all other aspects of it. -type OrphanResourceTransformer struct { - Concrete ConcreteResourceNodeFunc - - // State is the global state. - State *states.State - - // Config is the root node in the configuration tree. - Config *configs.Config -} - -func (t *OrphanResourceTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - if t.Config == nil { - // Should never happen: we can't be doing any Terraform operations - // without at least an empty configuration. - panic("OrphanResourceTransformer used without setting Config") - } - - // We'll first collect up the existing nodes for each resource so we can - // create dependency edges for any new nodes we create. - deps := map[string][]dag.Vertex{} - for _, v := range g.Vertices() { - switch tv := v.(type) { - case GraphNodeResourceInstance: - k := tv.ResourceInstanceAddr().ContainingResource().String() - deps[k] = append(deps[k], v) - case GraphNodeResource: - k := tv.ResourceAddr().String() - deps[k] = append(deps[k], v) - case GraphNodeDestroyer: - k := tv.DestroyAddr().ContainingResource().String() - deps[k] = append(deps[k], v) - } - } - - for _, ms := range t.State.Modules { - moduleAddr := ms.Addr - - mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed - - for _, rs := range ms.Resources { - if mc != nil { - if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { - // It's in the config, so nothing to do for this one. - continue - } - } - - addr := rs.Addr.Absolute(moduleAddr) - abstract := NewNodeAbstractResource(addr) - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) - g.Add(node) - for _, dn := range deps[addr.String()] { - log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) - g.Connect(dag.BasicEdge(node, dn)) - } - } - } - - return nil - -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go deleted file mode 100644 index ed93cdb8..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ /dev/null @@ -1,95 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" -) - -// OutputTransformer is a GraphTransformer that adds all the outputs -// in the configuration to the graph. -// -// This is done for the apply graph builder even if dependent nodes -// aren't changing since there is no downside: the state will be available -// even if the dependent items aren't changing. -type OutputTransformer struct { - Config *configs.Config -} - -func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Config) -} - -func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { - // If we have no config then there can be no outputs. - if c == nil { - return nil - } - - // Transform all the children. We must do this first because - // we can reference module outputs and they must show up in the - // reference map. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - - // Our addressing system distinguishes between modules and module instances, - // but we're not yet ready to make that distinction here (since we don't - // support "count"/"for_each" on modules) and so we just do a naive - // transform of the module path into a module instance path, assuming that - // no keys are in use. This should be removed when "count" and "for_each" - // are implemented for modules. - path := c.Path.UnkeyedInstanceShim() - - for _, o := range c.Module.Outputs { - addr := path.OutputValue(o.Name) - node := &NodeApplyableOutput{ - Addr: addr, - Config: o, - } - g.Add(node) - } - - return nil -} - -// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete -// outputs during destroy. We need to do this to ensure that no stale outputs -// are ever left in the state. -type DestroyOutputTransformer struct { -} - -func (t *DestroyOutputTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - output, ok := v.(*NodeApplyableOutput) - if !ok { - continue - } - - // create the destroy node for this output - node := &NodeDestroyableOutput{ - Addr: output.Addr, - Config: output.Config, - } - - log.Printf("[TRACE] creating %s", node.Name()) - g.Add(node) - - deps, err := g.Descendents(v) - if err != nil { - return err - } - - // the destroy node must depend on the eval node - deps.Add(v) - - for _, d := range deps.List() { - log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) - g.Connect(dag.BasicEdge(node, d)) - } - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go deleted file mode 100644 index 8cfb0b5c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ /dev/null @@ -1,731 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/tfdiags" -) - -func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { - return GraphTransformMulti( - // Add providers from the config - &ProviderConfigTransformer{ - Config: config, - Providers: providers, - Concrete: concrete, - }, - // Add any remaining missing providers - &MissingProviderTransformer{ - Providers: providers, - Concrete: concrete, - }, - // Connect the providers - &ProviderTransformer{ - Config: config, - }, - // Remove unused providers and proxies - &PruneProviderTransformer{}, - // Connect provider to their parent provider nodes - &ParentProviderTransformer{}, - ) -} - -// GraphNodeProvider is an interface that nodes that can be a provider -// must implement. -// -// ProviderAddr returns the address of the provider configuration this -// satisfies, which is relative to the path returned by method Path(). -// -// Name returns the full name of the provider in the config. -type GraphNodeProvider interface { - GraphNodeSubPath - ProviderAddr() addrs.AbsProviderConfig - Name() string -} - -// GraphNodeCloseProvider is an interface that nodes that can be a close -// provider must implement. The CloseProviderName returned is the name of -// the provider they satisfy. -type GraphNodeCloseProvider interface { - GraphNodeSubPath - CloseProviderAddr() addrs.AbsProviderConfig -} - -// GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the address of the provider -// to use, which will be resolved to a configuration either in the same module -// or in an ancestor module, with the resulting absolute address passed to -// SetProvider. -type GraphNodeProviderConsumer interface { - // ProvidedBy returns the address of the provider configuration the node - // refers to. If the returned "exact" value is true, this address will - // be taken exactly. If "exact" is false, a provider configuration from - // an ancestor module may be selected instead. - ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) - // Set the resolved provider address for this resource. - SetProvider(addrs.AbsProviderConfig) -} - -// ProviderTransformer is a GraphTransformer that maps resources to -// providers within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProviderTransformer struct { - Config *configs.Config -} - -func (t *ProviderTransformer) Transform(g *Graph) error { - // We need to find a provider configuration address for each resource - // either directly represented by a node or referenced by a node in - // the graph, and then create graph edges from provider to provider user - // so that the providers will get initialized first. - - var diags tfdiags.Diagnostics - - // To start, we'll collect the _requested_ provider addresses for each - // node, which we'll then resolve (handling provider inheritence, etc) in - // the next step. - // Our "requested" map is from graph vertices to string representations of - // provider config addresses (for deduping) to requests. - type ProviderRequest struct { - Addr addrs.AbsProviderConfig - Exact bool // If true, inheritence from parent modules is not attempted - } - requested := map[dag.Vertex]map[string]ProviderRequest{} - needConfigured := map[string]addrs.AbsProviderConfig{} - for _, v := range g.Vertices() { - - // FIXME: fix the type that implements this, so it's not a - // GraphNodeProviderConsumer. - // check if we want to skip connecting this to a provider - if _, ok := v.(GraphNodeNoProvider); ok { - continue - } - - // Does the vertex _directly_ use a provider? - if pv, ok := v.(GraphNodeProviderConsumer); ok { - requested[v] = make(map[string]ProviderRequest) - - p, exact := pv.ProvidedBy() - if exact { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) - } else { - log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) - } - - requested[v][p.String()] = ProviderRequest{ - Addr: p, - Exact: exact, - } - - // Direct references need the provider configured as well as initialized - needConfigured[p.String()] = p - } - } - - // Now we'll go through all the requested addresses we just collected and - // figure out which _actual_ config address each belongs to, after resolving - // for provider inheritance and passing. - m := providerVertexMap(g) - for v, reqs := range requested { - for key, req := range reqs { - p := req.Addr - target := m[key] - - _, ok := v.(GraphNodeSubPath) - if !ok && target == nil { - // No target and no path to traverse up from - diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) - continue - } - - if target != nil { - log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) - } - - // if we don't have a provider at this level, walk up the path looking for one, - // unless we were told to be exact. - if target == nil && !req.Exact { - for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { - key := pp.String() - target = m[key] - if target != nil { - log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) - break - } - log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) - } - } - - // If this provider doesn't need to be configured then we can just - // stub it out with an init-only provider node, which will just - // start up the provider and fetch its schema. - if _, exists := needConfigured[key]; target == nil && !exists { - stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) - stub := &NodeEvalableProvider{ - &NodeAbstractProvider{ - Addr: stubAddr, - }, - } - m[stubAddr.String()] = stub - log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) - target = stub - g.Add(target) - } - - if target == nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider configuration not present", - fmt.Sprintf( - "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", - dag.VertexName(v), p, dag.VertexName(v), - ), - )) - break - } - - // see if this in an inherited provider - if p, ok := target.(*graphNodeProxyProvider); ok { - g.Remove(p) - target = p.Target() - key = target.(GraphNodeProvider).ProviderAddr().String() - } - - log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) - if pv, ok := v.(GraphNodeProviderConsumer); ok { - pv.SetProvider(target.ProviderAddr()) - } - g.Connect(dag.BasicEdge(v, target)) - } - } - - return diags.Err() -} - -// CloseProviderTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provider connections that aren't needed anymore. -// A provider connection is not needed anymore once all depended resources -// in the graph are evaluated. -type CloseProviderTransformer struct{} - -func (t *CloseProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - cpm := make(map[string]*graphNodeCloseProvider) - var err error - - for _, v := range pm { - p := v.(GraphNodeProvider) - key := p.ProviderAddr().String() - - // get the close provider of this type if we alread created it - closer := cpm[key] - - if closer == nil { - // create a closer for this provider type - closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} - g.Add(closer) - cpm[key] = closer - } - - // Close node depends on the provider itself - // this is added unconditionally, so it will connect to all instances - // of the provider. Extra edges will be removed by transitive - // reduction. - g.Connect(dag.BasicEdge(closer, p)) - - // connect all the provider's resources to the close node - for _, s := range g.UpEdges(p).List() { - if _, ok := s.(GraphNodeProviderConsumer); ok { - g.Connect(dag.BasicEdge(closer, s)) - } - } - } - - return err -} - -// MissingProviderTransformer is a GraphTransformer that adds to the graph -// a node for each default provider configuration that is referenced by another -// node but not already present in the graph. -// -// These "default" nodes are always added to the root module, regardless of -// where they are requested. This is important because our inheritance -// resolution behavior in ProviderTransformer will then treat these as a -// last-ditch fallback after walking up the tree, rather than preferring them -// as it would if they were placed in the same module as the requester. -// -// This transformer may create extra nodes that are not needed in practice, -// due to overriding provider configurations in child modules. -// PruneProviderTransformer can then remove these once ProviderTransformer -// has resolved all of the inheritence, etc. -type MissingProviderTransformer struct { - // Providers is the list of providers we support. - Providers []string - - // Concrete, if set, overrides how the providers are made. - Concrete ConcreteProviderNodeFunc -} - -func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Initialize factory - if t.Concrete == nil { - t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { - return a - } - } - - var err error - m := providerVertexMap(g) - for _, v := range g.Vertices() { - // FIXME: fix the type that implements this, so it's not a - // GraphNodeProviderConsumer. - // check if we want to skip connecting this to a provider - if _, ok := v.(GraphNodeNoProvider); ok { - continue - } - - pv, ok := v.(GraphNodeProviderConsumer) - if !ok { - continue - } - - // For our work here we actually care only about the provider type and - // we plan to place all default providers in the root module, and so - // it's safe for us to rely on ProvidedBy here rather than waiting for - // the later proper resolution of provider inheritance done by - // ProviderTransformer. - p, _ := pv.ProvidedBy() - if p.ProviderConfig.Alias != "" { - // We do not create default aliased configurations. - log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) - continue - } - - // We're going to create an implicit _default_ configuration for the - // referenced provider type in the _root_ module, ignoring all other - // aspects of the resource's declared provider address. - defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type.LegacyString()) - key := defaultAddr.String() - provider := m[key] - - if provider != nil { - // There's already an explicit default configuration for this - // provider type in the root module, so we have nothing to do. - continue - } - - log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - - // create the missing top-level provider - provider = t.Concrete(&NodeAbstractProvider{ - Addr: defaultAddr, - }).(GraphNodeProvider) - - g.Add(provider) - m[key] = provider - } - - return err -} - -// ParentProviderTransformer connects provider nodes to their parents. -// -// This works by finding nodes that are both GraphNodeProviders and -// GraphNodeSubPath. It then connects the providers to their parent -// path. The parent provider is always at the root level. -type ParentProviderTransformer struct{} - -func (t *ParentProviderTransformer) Transform(g *Graph) error { - pm := providerVertexMap(g) - for _, v := range g.Vertices() { - // Only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // Also require non-empty path, since otherwise we're in the root - // module and so cannot have a parent. - if len(pn.Path()) <= 1 { - continue - } - - // this provider may be disabled, but we can only get it's name from - // the ProviderName string - addr := pn.ProviderAddr() - parentAddr, ok := addr.Inherited() - if ok { - parent := pm[parentAddr.String()] - if parent != nil { - g.Connect(dag.BasicEdge(v, parent)) - } - } - } - return nil -} - -// PruneProviderTransformer removes any providers that are not actually used by -// anything, and provider proxies. This avoids the provider being initialized -// and configured. This both saves resources but also avoids errors since -// configuration may imply initialization which may require auth. -type PruneProviderTransformer struct{} - -func (t *PruneProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - _, ok := v.(GraphNodeProvider) - if !ok { - continue - } - - // ProxyProviders will have up edges, but we're now done with them in the graph - if _, ok := v.(*graphNodeProxyProvider); ok { - log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) - g.Remove(v) - } - - // Remove providers with no dependencies. - if g.UpEdges(v).Len() == 0 { - log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) - g.Remove(v) - } - } - - return nil -} - -func providerVertexMap(g *Graph) map[string]GraphNodeProvider { - m := make(map[string]GraphNodeProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvider); ok { - addr := pv.ProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider { - m := make(map[string]GraphNodeCloseProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvider); ok { - addr := pv.CloseProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - -type graphNodeCloseProvider struct { - Addr addrs.AbsProviderConfig -} - -var ( - _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) -) - -func (n *graphNodeCloseProvider) Name() string { - return n.Addr.String() + " (close)" -} - -// GraphNodeSubPath impl. -func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { - return n.Addr.Module -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.Addr) -} - -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} -} - -func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { - return n.Addr -} - -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { - if !opts.Verbose { - return nil - } - return &dag.DotNode{ - Name: name, - Attrs: map[string]string{ - "label": n.Name(), - "shape": "diamond", - }, - } -} - -// RemovableIfNotTargeted -func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { - // We need to add this so that this node will be removed if - // it isn't targeted or a dependency of a target. - return true -} - -// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to -// store the name and value of a provider node for inheritance between modules. -// These nodes are only used to store the data while loading the provider -// configurations, and are removed after all the resources have been connected -// to their providers. -type graphNodeProxyProvider struct { - addr addrs.AbsProviderConfig - target GraphNodeProvider -} - -var ( - _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) -) - -func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { - return n.addr -} - -func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { - return n.addr.Module -} - -func (n *graphNodeProxyProvider) Name() string { - return n.addr.String() + " (proxy)" -} - -// find the concrete provider instance -func (n *graphNodeProxyProvider) Target() GraphNodeProvider { - switch t := n.target.(type) { - case *graphNodeProxyProvider: - return t.Target() - default: - return n.target - } -} - -// ProviderConfigTransformer adds all provider nodes from the configuration and -// attaches the configs. -type ProviderConfigTransformer struct { - Providers []string - Concrete ConcreteProviderNodeFunc - - // each provider node is stored here so that the proxy nodes can look up - // their targets by name. - providers map[string]GraphNodeProvider - // record providers that can be overriden with a proxy - proxiable map[string]bool - - // Config is the root node of the configuration tree to add providers from. - Config *configs.Config -} - -func (t *ProviderConfigTransformer) Transform(g *Graph) error { - // If no configuration is given, we don't do anything - if t.Config == nil { - return nil - } - - t.providers = make(map[string]GraphNodeProvider) - t.proxiable = make(map[string]bool) - - // Start the transformation process - if err := t.transform(g, t.Config); err != nil { - return err - } - - // finally attach the configs to the new nodes - return t.attachProviderConfigs(g) -} - -func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { - // If no config, do nothing - if c == nil { - return nil - } - - // Add our resources - if err := t.transformSingle(g, c); err != nil { - return err - } - - // Transform all the children. - for _, cc := range c.Children { - if err := t.transform(g, cc); err != nil { - return err - } - } - return nil -} - -func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { - // Get the module associated with this configuration tree node - mod := c.Module - staticPath := c.Path - - // We actually need a dynamic module path here, but we've not yet updated - // our graph builders enough to support expansion of module calls with - // "count" and "for_each" set, so for now we'll shim this by converting to - // a dynamic path with no keys. At the time of writing this is the only - // possible kind of dynamic path anyway. - path := make(addrs.ModuleInstance, len(staticPath)) - for i, name := range staticPath { - path[i] = addrs.ModuleInstanceStep{ - Name: name, - } - } - - // add all providers from the configuration - for _, p := range mod.ProviderConfigs { - relAddr := p.Addr() - addr := relAddr.Absolute(path) - - abstract := &NodeAbstractProvider{ - Addr: addr, - } - var v dag.Vertex - if t.Concrete != nil { - v = t.Concrete(abstract) - } else { - v = abstract - } - - // Add it to the graph - g.Add(v) - key := addr.String() - t.providers[key] = v.(GraphNodeProvider) - - // A provider configuration is "proxyable" if its configuration is - // entirely empty. This means it's standing in for a provider - // configuration that must be passed in from the parent module. - // We decide this by evaluating the config with an empty schema; - // if this succeeds, then we know there's nothing in the body. - _, diags := p.Config.Content(&hcl.BodySchema{}) - t.proxiable[key] = !diags.HasErrors() - } - - // Now replace the provider nodes with proxy nodes if a provider was being - // passed in, and create implicit proxies if there was no config. Any extra - // proxies will be removed in the prune step. - return t.addProxyProviders(g, c) -} - -func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { - path := c.Path - - // can't add proxies at the root - if len(path) == 0 { - return nil - } - - parentPath, callAddr := path.Call() - parent := c.Parent - if parent == nil { - return nil - } - - callName := callAddr.Name - var parentCfg *configs.ModuleCall - for name, mod := range parent.Module.ModuleCalls { - if name == callName { - parentCfg = mod - break - } - } - - // We currently don't support count/for_each for modules and so we must - // shim our path and parentPath into module instances here so that the - // rest of Terraform can behave as if we do. This shimming should be - // removed later as part of implementing count/for_each for modules. - instPath := make(addrs.ModuleInstance, len(path)) - for i, name := range path { - instPath[i] = addrs.ModuleInstanceStep{Name: name} - } - parentInstPath := make(addrs.ModuleInstance, len(parentPath)) - for i, name := range parentPath { - parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} - } - - if parentCfg == nil { - // this can't really happen during normal execution. - return fmt.Errorf("parent module config not found for %s", c.Path.String()) - } - - // Go through all the providers the parent is passing in, and add proxies to - // the parent provider nodes. - for _, pair := range parentCfg.Providers { - fullAddr := pair.InChild.Addr().Absolute(instPath) - fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) - fullName := fullAddr.String() - fullParentName := fullParentAddr.String() - - parentProvider := t.providers[fullParentName] - - if parentProvider == nil { - return fmt.Errorf("missing provider %s", fullParentName) - } - - proxy := &graphNodeProxyProvider{ - addr: fullAddr, - target: parentProvider, - } - - concreteProvider := t.providers[fullName] - - // replace the concrete node with the provider passed in - if concreteProvider != nil && t.proxiable[fullName] { - g.Replace(concreteProvider, proxy) - t.providers[fullName] = proxy - continue - } - - // aliased configurations can't be implicitly passed in - if fullAddr.ProviderConfig.Alias != "" { - continue - } - - // There was no concrete provider, so add this as an implicit provider. - // The extra proxy will be pruned later if it's unused. - g.Add(proxy) - t.providers[fullName] = proxy - } - return nil -} - -func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - addr := apn.ProviderAddr() - - // Get the configuration. - mc := t.Config.DescendentForInstance(addr.Module) - if mc == nil { - log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) - continue - } - - // Go through the provider configs to find the matching config - for _, p := range mc.Module.ProviderConfigs { - if p.Name == addr.ProviderConfig.Type.LegacyString() && p.Alias == addr.ProviderConfig.Alias { - log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go deleted file mode 100644 index b3102665..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go +++ /dev/null @@ -1,179 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeProvisioner is an interface that nodes that can be a provisioner -// must implement. The ProvisionerName returned is the name of the provisioner -// they satisfy. -type GraphNodeProvisioner interface { - ProvisionerName() string -} - -// GraphNodeCloseProvisioner is an interface that nodes that can be a close -// provisioner must implement. The CloseProvisionerName returned is the name -// of the provisioner they satisfy. -type GraphNodeCloseProvisioner interface { - CloseProvisionerName() string -} - -// GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the names of the -// provisioners to use. -type GraphNodeProvisionerConsumer interface { - ProvisionedBy() []string -} - -// ProvisionerTransformer is a GraphTransformer that maps resources to -// provisioners within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProvisionerTransformer struct{} - -func (t *ProvisionerTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to provisioners they need - var err error - m := provisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - if m[p] == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: provisioner %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), p, dag.VertexName(m[p])) - g.Connect(dag.BasicEdge(v, m[p])) - } - } - } - - return err -} - -// MissingProvisionerTransformer is a GraphTransformer that adds nodes -// for missing provisioners into the graph. -type MissingProvisionerTransformer struct { - // Provisioners is the list of provisioners we support. - Provisioners []string -} - -func (t *MissingProvisionerTransformer) Transform(g *Graph) error { - // Create a set of our supported provisioners - supported := make(map[string]struct{}, len(t.Provisioners)) - for _, v := range t.Provisioners { - supported[v] = struct{}{} - } - - // Get the map of provisioners we already have in our graph - m := provisionerVertexMap(g) - - // Go through all the provisioner consumers and make sure we add - // that provisioner if it is missing. - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProvisionerConsumer) - if !ok { - continue - } - - for _, p := range pv.ProvisionedBy() { - if _, ok := m[p]; ok { - // This provisioner already exists as a configure node - continue - } - - if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, we skip it. - // Validation later will catch this as an error. - continue - } - - // Build the vertex - var newV dag.Vertex = &NodeProvisioner{ - NameValue: p, - } - - // Add the missing provisioner node to the graph - m[p] = g.Add(newV) - log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", p, dag.VertexName(v)) - } - } - - return nil -} - -// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provisioner connections that aren't needed -// anymore. A provisioner connection is not needed anymore once all depended -// resources in the graph are evaluated. -type CloseProvisionerTransformer struct{} - -func (t *CloseProvisionerTransformer) Transform(g *Graph) error { - m := closeProvisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - source := m[p] - - if source == nil { - // Create a new graphNodeCloseProvisioner and add it to the graph - source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} - g.Add(source) - - // Make sure we also add the new graphNodeCloseProvisioner to the map - // so we don't create and add any duplicate graphNodeCloseProvisioners. - m[p] = source - } - - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return nil -} - -func provisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisioner); ok { - m[pv.ProvisionerName()] = v - } - } - - return m -} - -func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvisioner); ok { - m[pv.CloseProvisionerName()] = v - } - } - - return m -} - -type graphNodeCloseProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeCloseProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { - return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} -} - -func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { - return n.ProvisionerNameValue -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go deleted file mode 100644 index df34a3ce..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ /dev/null @@ -1,552 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/lang" - "github.com/hashicorp/terraform/states" -) - -// GraphNodeReferenceable must be implemented by any node that represents -// a Terraform thing that can be referenced (resource, module, etc.). -// -// Even if the thing has no name, this should return an empty list. By -// implementing this and returning a non-nil result, you say that this CAN -// be referenced and other methods of referencing may still be possible (such -// as by path!) -type GraphNodeReferenceable interface { - GraphNodeSubPath - - // ReferenceableAddrs returns a list of addresses through which this can be - // referenced. - ReferenceableAddrs() []addrs.Referenceable -} - -// GraphNodeReferencer must be implemented by nodes that reference other -// Terraform items and therefore depend on them. -type GraphNodeReferencer interface { - GraphNodeSubPath - - // References returns a list of references made by this node, which - // include both a referenced address and source location information for - // the reference. - References() []*addrs.Reference -} - -type GraphNodeAttachDependencies interface { - GraphNodeResource - AttachDependencies([]addrs.AbsResource) -} - -// GraphNodeReferenceOutside is an interface that can optionally be implemented. -// A node that implements it can specify that its own referenceable addresses -// and/or the addresses it references are in a different module than the -// node itself. -// -// Any referenceable addresses returned by ReferenceableAddrs are interpreted -// relative to the returned selfPath. -// -// Any references returned by References are interpreted relative to the -// returned referencePath. -// -// It is valid but not required for either of these paths to match what is -// returned by method Path, though if both match the main Path then there -// is no reason to implement this method. -// -// The primary use-case for this is the nodes representing module input -// variables, since their expressions are resolved in terms of their calling -// module, but they are still referenced from their own module. -type GraphNodeReferenceOutside interface { - // ReferenceOutside returns a path in which any references from this node - // are resolved. - ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) -} - -// ReferenceTransformer is a GraphTransformer that connects all the -// nodes that reference each other in order to form the proper ordering. -type ReferenceTransformer struct{} - -func (t *ReferenceTransformer) Transform(g *Graph) error { - // Build a reference map so we can efficiently look up the references - vs := g.Vertices() - m := NewReferenceMap(vs) - - // Find the things that reference things and connect them - for _, v := range vs { - parents, _ := m.References(v) - parentsDbg := make([]string, len(parents)) - for i, v := range parents { - parentsDbg[i] = dag.VertexName(v) - } - log.Printf( - "[DEBUG] ReferenceTransformer: %q references: %v", - dag.VertexName(v), parentsDbg) - - for _, parent := range parents { - g.Connect(dag.BasicEdge(v, parent)) - } - - if len(parents) > 0 { - continue - } - } - - return nil -} - -// AttachDependenciesTransformer records all resource dependencies for each -// instance, and attaches the addresses to the node itself. Managed resource -// will record these in the state for proper ordering of destroy operations. -type AttachDependenciesTransformer struct { - Config *configs.Config - State *states.State - Schemas *Schemas -} - -func (t AttachDependenciesTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - attacher, ok := v.(GraphNodeAttachDependencies) - if !ok { - continue - } - selfAddr := attacher.ResourceAddr() - - // Data sources don't need to track destroy dependencies - if selfAddr.Resource.Mode == addrs.DataResourceMode { - continue - } - - ans, err := g.Ancestors(v) - if err != nil { - return err - } - - // dedupe addrs when there's multiple instances involved, or - // multiple paths in the un-reduced graph - depMap := map[string]addrs.AbsResource{} - for _, d := range ans.List() { - var addr addrs.AbsResource - - switch d := d.(type) { - case GraphNodeResourceInstance: - instAddr := d.ResourceInstanceAddr() - addr = instAddr.Resource.Resource.Absolute(instAddr.Module) - case GraphNodeResource: - addr = d.ResourceAddr() - default: - continue - } - - // Data sources don't need to track destroy dependencies - if addr.Resource.Mode == addrs.DataResourceMode { - continue - } - - if addr.Equal(selfAddr) { - continue - } - depMap[addr.String()] = addr - } - - deps := make([]addrs.AbsResource, 0, len(depMap)) - for _, d := range depMap { - deps = append(deps, d) - } - sort.Slice(deps, func(i, j int) bool { - return deps[i].String() < deps[j].String() - }) - - log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps) - attacher.AttachDependencies(deps) - } - - return nil -} - -// DestroyReferenceTransformer is a GraphTransformer that reverses the edges -// for locals and outputs that depend on other nodes which will be -// removed during destroy. If a destroy node is evaluated before the local or -// output value, it will be removed from the state, and the later interpolation -// will fail. -type DestroyValueReferenceTransformer struct{} - -func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { - vs := g.Vertices() - for _, v := range vs { - switch v.(type) { - case *NodeApplyableOutput, *NodeLocal: - // OK - default: - continue - } - - // reverse any outgoing edges so that the value is evaluated first. - for _, e := range g.EdgesFrom(v) { - target := e.Target() - - // only destroy nodes will be evaluated in reverse - if _, ok := target.(GraphNodeDestroyer); !ok { - continue - } - - log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) - - g.RemoveEdge(e) - g.Connect(&DestroyEdge{S: target, T: v}) - } - } - - return nil -} - -// PruneUnusedValuesTransformer is a GraphTransformer that removes local, -// variable, and output values which are not referenced in the graph. If these -// values reference a resource that is no longer in the state the interpolation -// could fail. -type PruneUnusedValuesTransformer struct { - Destroy bool -} - -func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { - // Pruning a value can effect previously checked edges, so loop until there - // are no more changes. - for removed := 0; ; removed = 0 { - for _, v := range g.Vertices() { - switch v := v.(type) { - case *NodeApplyableOutput: - // If we're not certain this is a full destroy, we need to keep any - // root module outputs - if v.Addr.Module.IsRoot() && !t.Destroy { - continue - } - case *NodeLocal, *NodeApplyableModuleVariable: - // OK - default: - // We're only concerned with variables, locals and outputs - continue - } - - dependants := g.UpEdges(v) - - switch dependants.Len() { - case 0: - // nothing at all depends on this - log.Printf("[TRACE] PruneUnusedValuesTransformer: removing unused value %s", dag.VertexName(v)) - g.Remove(v) - removed++ - case 1: - // because an output's destroy node always depends on the output, - // we need to check for the case of a single destroy node. - d := dependants.List()[0] - if _, ok := d.(*NodeDestroyableOutput); ok { - log.Printf("[TRACE] PruneUnusedValuesTransformer: removing unused value %s", dag.VertexName(v)) - g.Remove(v) - removed++ - } - } - } - if removed == 0 { - break - } - } - - return nil -} - -// ReferenceMap is a structure that can be used to efficiently check -// for references on a graph. -type ReferenceMap struct { - // vertices is a map from internal reference keys (as produced by the - // mapKey method) to one or more vertices that are identified by each key. - // - // A particular reference key might actually identify multiple vertices, - // e.g. in situations where one object is contained inside another. - vertices map[string][]dag.Vertex - - // edges is a map whose keys are a subset of the internal reference keys - // from "vertices", and whose values are the nodes that refer to each - // key. The values in this map are the referrers, while values in - // "verticies" are the referents. The keys in both cases are referents. - edges map[string][]dag.Vertex -} - -// References returns the set of vertices that the given vertex refers to, -// and any referenced addresses that do not have corresponding vertices. -func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { - rn, ok := v.(GraphNodeReferencer) - if !ok { - return nil, nil - } - if _, ok := v.(GraphNodeSubPath); !ok { - return nil, nil - } - - var matches []dag.Vertex - var missing []addrs.Referenceable - - for _, ref := range rn.References() { - subject := ref.Subject - - key := m.referenceMapKey(v, subject) - if _, exists := m.vertices[key]; !exists { - // If what we were looking for was a ResourceInstance then we - // might be in a resource-oriented graph rather than an - // instance-oriented graph, and so we'll see if we have the - // resource itself instead. - switch ri := subject.(type) { - case addrs.ResourceInstance: - subject = ri.ContainingResource() - case addrs.ResourceInstancePhase: - subject = ri.ContainingResource() - } - key = m.referenceMapKey(v, subject) - } - - vertices := m.vertices[key] - for _, rv := range vertices { - // don't include self-references - if rv == v { - continue - } - matches = append(matches, rv) - } - if len(vertices) == 0 { - missing = append(missing, ref.Subject) - } - } - - return matches, missing -} - -// Referrers returns the set of vertices that refer to the given vertex. -func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { - rn, ok := v.(GraphNodeReferenceable) - if !ok { - return nil - } - sp, ok := v.(GraphNodeSubPath) - if !ok { - return nil - } - - var matches []dag.Vertex - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(sp.Path(), addr) - referrers, ok := m.edges[key] - if !ok { - continue - } - - // If the referrer set includes our own given vertex then we skip, - // since we don't want to return self-references. - selfRef := false - for _, p := range referrers { - if p == v { - selfRef = true - break - } - } - if selfRef { - continue - } - - matches = append(matches, referrers...) - } - - return matches -} - -func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { - return fmt.Sprintf("%s|%s", path.String(), addr.String()) -} - -// vertexReferenceablePath returns the path in which the given vertex can be -// referenced. This is the path that its results from ReferenceableAddrs -// are considered to be relative to. -// -// Only GraphNodeSubPath implementations can be referenced, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { - sp, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) - } - - if outside, ok := v.(GraphNodeReferenceOutside); ok { - // Vertex is referenced from a different module than where it was - // declared. - path, _ := outside.ReferenceOutside() - return path - } - - // Vertex is referenced from the same module as where it was declared. - return sp.Path() -} - -// vertexReferencePath returns the path in which references _from_ the given -// vertex must be interpreted. -// -// Only GraphNodeSubPath implementations can have references, so this method -// will panic if the given vertex does not implement that interface. -func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { - sp, ok := referrer.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) - } - - var path addrs.ModuleInstance - if outside, ok := referrer.(GraphNodeReferenceOutside); ok { - // Vertex makes references to objects in a different module than where - // it was declared. - _, path = outside.ReferenceOutside() - return path - } - - // Vertex makes references to objects in the same module as where it - // was declared. - return sp.Path() -} - -// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex -// that the reference is from, and "addr" is the address of the object being -// referenced. -// -// The result is an opaque string that includes both the address of the given -// object and the address of the module instance that object belongs to. -// -// Only GraphNodeSubPath implementations can be referrers, so this method will -// panic if the given vertex does not implement that interface. -func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { - path := vertexReferencePath(referrer) - return m.mapKey(path, addr) -} - -// NewReferenceMap is used to create a new reference map for the -// given set of vertices. -func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { - var m ReferenceMap - - // Build the lookup table - vertices := make(map[string][]dag.Vertex) - for _, v := range vs { - _, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - continue - } - - // We're only looking for referenceable nodes - rn, ok := v.(GraphNodeReferenceable) - if !ok { - continue - } - - path := m.vertexReferenceablePath(v) - - // Go through and cache them - for _, addr := range rn.ReferenceableAddrs() { - key := m.mapKey(path, addr) - vertices[key] = append(vertices[key], v) - } - - // Any node can be referenced by the address of the module it belongs - // to or any of that module's ancestors. - for _, addr := range path.Ancestors()[1:] { - // Can be referenced either as the specific call instance (with - // an instance key) or as the bare module call itself (the "module" - // block in the parent module that created the instance). - callPath, call := addr.Call() - callInstPath, callInst := addr.CallInstance() - callKey := m.mapKey(callPath, call) - callInstKey := m.mapKey(callInstPath, callInst) - vertices[callKey] = append(vertices[callKey], v) - vertices[callInstKey] = append(vertices[callInstKey], v) - } - } - - // Build the lookup table for referenced by - edges := make(map[string][]dag.Vertex) - for _, v := range vs { - _, ok := v.(GraphNodeSubPath) - if !ok { - // Only nodes with paths can participate in a reference map. - continue - } - - rn, ok := v.(GraphNodeReferencer) - if !ok { - // We're only looking for referenceable nodes - continue - } - - // Go through and cache them - for _, ref := range rn.References() { - if ref.Subject == nil { - // Should never happen - panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) - } - key := m.referenceMapKey(v, ref.Subject) - edges[key] = append(edges[key], v) - } - } - - m.vertices = vertices - m.edges = edges - return &m -} - -// ReferencesFromConfig returns the references that a configuration has -// based on the interpolated variables in a configuration. -func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { - if body == nil { - return nil - } - refs, _ := lang.ReferencesInBlock(body, schema) - return refs -} - -// appendResourceDestroyReferences identifies resource and resource instance -// references in the given slice and appends to it the "destroy-phase" -// equivalents of those references, returning the result. -// -// This can be used in the References implementation for a node which must also -// depend on the destruction of anything it references. -func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { - given := refs - for _, ref := range given { - switch tr := ref.Subject.(type) { - case addrs.Resource: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - case addrs.ResourceInstance: - newRef := *ref // shallow copy - newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) - refs = append(refs, &newRef) - } - } - return refs -} - -func modulePrefixStr(p addrs.ModuleInstance) string { - return p.String() -} - -func modulePrefixList(result []string, prefix string) []string { - if prefix != "" { - for i, v := range result { - result[i] = fmt.Sprintf("%s.%s", prefix, v) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go deleted file mode 100644 index ee71387e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go +++ /dev/null @@ -1,33 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/states" -) - -// RemovedModuleTransformer implements GraphTransformer to add nodes indicating -// when a module was removed from the configuration. -type RemovedModuleTransformer struct { - Config *configs.Config // root node in the config tree - State *states.State -} - -func (t *RemovedModuleTransformer) Transform(g *Graph) error { - // nothing to remove if there's no state! - if t.State == nil { - return nil - } - - for _, m := range t.State.Modules { - cc := t.Config.DescendentForInstance(m.Addr) - if cc != nil { - continue - } - - log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) - g.Add(&NodeModuleRemoved{Addr: m.Addr}) - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go deleted file mode 100644 index c70a3c14..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go +++ /dev/null @@ -1,71 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/dag" - "github.com/zclconf/go-cty/cty" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -// -// This assumes that the count is already interpolated. -type ResourceCountTransformer struct { - Concrete ConcreteResourceInstanceNodeFunc - Schema *configschema.Block - - // Count is either the number of indexed instances to create, or -1 to - // indicate that count is not set at all and thus a no-key instance should - // be created. - Count int - ForEach map[string]cty.Value - Addr addrs.AbsResource -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - if t.Count < 0 && t.ForEach == nil { - // Negative count indicates that count is not set at all. - addr := t.Addr.Instance(addrs.NoKey) - - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - return nil - } - - // Add nodes related to the for_each expression - for key := range t.ForEach { - addr := t.Addr.Instance(addrs.StringKey(key)) - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - // For each count, build and add the node - for i := 0; i < t.Count; i++ { - key := addrs.IntKey(i) - addr := t.Addr.Instance(key) - - abstract := NewNodeAbstractResourceInstance(addr) - abstract.Schema = t.Schema - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) - } - - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go deleted file mode 100644 index aee053d1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go +++ /dev/null @@ -1,38 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform/dag" - -const rootNodeName = "root" - -// RootTransformer is a GraphTransformer that adds a root to the graph. -type RootTransformer struct{} - -func (t *RootTransformer) Transform(g *Graph) error { - // If we already have a good root, we're done - if _, err := g.Root(); err == nil { - return nil - } - - // Add a root - var root graphNodeRoot - g.Add(root) - - // Connect the root to all the edges that need it - for _, v := range g.Vertices() { - if v == root { - continue - } - - if g.UpEdges(v).Len() == 0 { - g.Connect(dag.BasicEdge(root, v)) - } - } - - return nil -} - -type graphNodeRoot struct{} - -func (n graphNodeRoot) Name() string { - return rootNodeName -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go deleted file mode 100644 index 0b52347d..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go +++ /dev/null @@ -1,74 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/states" -) - -// StateTransformer is a GraphTransformer that adds the elements of -// the state to the graph. -// -// This transform is used for example by the DestroyPlanGraphBuilder to ensure -// that only resources that are in the state are represented in the graph. -type StateTransformer struct { - // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract - // resource instance nodes that this transformer will create. - // - // If either of these is nil, the objects of that type will be skipped and - // not added to the graph at all. It doesn't make sense to use this - // transformer without setting at least one of these, since that would - // skip everything and thus be a no-op. - ConcreteCurrent ConcreteResourceInstanceNodeFunc - ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - - State *states.State -} - -func (t *StateTransformer) Transform(g *Graph) error { - if !t.State.HasResources() { - log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") - return nil - } - - switch { - case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") - case t.ConcreteCurrent != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") - case t.ConcreteDeposed != nil: - log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") - default: - log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") - } - - for _, ms := range t.State.Modules { - moduleAddr := ms.Addr - - for _, rs := range ms.Resources { - resourceAddr := rs.Addr.Absolute(moduleAddr) - - for key, is := range rs.Instances { - addr := resourceAddr.Instance(key) - - if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteCurrent(abstract) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) - } - - if t.ConcreteDeposed != nil { - for dk := range is.Deposed { - abstract := NewNodeAbstractResourceInstance(addr) - node := t.ConcreteDeposed(abstract, dk) - g.Add(node) - log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) - } - } - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go deleted file mode 100644 index d25274e6..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ /dev/null @@ -1,267 +0,0 @@ -package terraform - -import ( - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - SetTargets([]addrs.Targetable) -} - -// GraphNodeTargetDownstream is an interface for graph nodes that need to -// be remain present under targeting if any of their dependencies are targeted. -// TargetDownstream is called with the set of vertices that are direct -// dependencies for the node, and it should return true if the node must remain -// in the graph in support of those dependencies. -// -// This is used in situations where the dependency edges are representing an -// ordering relationship but the dependency must still be visited if its -// dependencies are visited. This is true for outputs, for example, since -// they must get updated if any of their dependent resources get updated, -// which would not normally be true if one of their dependencies were targeted. -type GraphNodeTargetDownstream interface { - TargetDownstream(targeted, untargeted *dag.Set) bool -} - -// TargetsTransformer is a GraphTransformer that, when the user specifies a -// list of resources to target, limits the graph to only those resources and -// their dependencies. -type TargetsTransformer struct { - // List of targeted resource names specified by the user - Targets []addrs.Targetable - - // If set, the index portions of resource addresses will be ignored - // for comparison. This is used when transforming a graph where - // counted resources have not yet been expanded, since otherwise - // the unexpanded nodes (which never have indices) would not match. - IgnoreIndices bool - - // Set to true when we're in a `terraform destroy` or a - // `terraform plan -destroy` - Destroy bool -} - -func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.Targets) - if err != nil { - return err - } - - for _, v := range g.Vertices() { - removable := false - if _, ok := v.(GraphNodeResource); ok { - removable = true - } - - if vr, ok := v.(RemovableIfNotTargeted); ok { - removable = vr.RemoveIfNotTargeted() - } - - if removable && !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } - } - } - - return nil -} - -// Returns a set of targeted nodes. A targeted node is either addressed -// directly, address indirectly via its container, or it's a dependency of a -// targeted node. Destroy mode keeps dependents instead of dependencies. -func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { - targetedNodes := new(dag.Set) - - vertices := g.Vertices() - - for _, v := range vertices { - if t.nodeIsTarget(v, addrs) { - targetedNodes.Add(v) - - // We inform nodes that ask about the list of targets - helps for nodes - // that need to dynamically expand. Note that this only occurs for nodes - // that are already directly targeted. - if tn, ok := v.(GraphNodeTargetable); ok { - tn.SetTargets(addrs) - } - - var deps *dag.Set - var err error - if t.Destroy { - deps, err = g.Descendents(v) - } else { - deps, err = g.Ancestors(v) - } - if err != nil { - return nil, err - } - - for _, d := range deps.List() { - targetedNodes.Add(d) - } - } - } - return t.addDependencies(targetedNodes, g) -} - -func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { - // Handle nodes that need to be included if their dependencies are included. - // This requires multiple passes since we need to catch transitive - // dependencies if and only if they are via other nodes that also - // support TargetDownstream. For example: - // output -> output -> targeted-resource: both outputs need to be targeted - // output -> non-targeted-resource -> targeted-resource: output not targeted - // - // We'll keep looping until we stop targeting more nodes. - queue := targetedNodes.List() - for len(queue) > 0 { - vertices := queue - queue = nil // ready to append for next iteration if neccessary - for _, v := range vertices { - // providers don't cause transitive dependencies, so don't target - // downstream from them. - if _, ok := v.(GraphNodeProvider); ok { - continue - } - - dependers := g.UpEdges(v) - if dependers == nil { - // indicates that there are no up edges for this node, so - // we have nothing to do here. - continue - } - - dependers = dependers.Filter(func(dv interface{}) bool { - _, ok := dv.(GraphNodeTargetDownstream) - return ok - }) - - if dependers.Len() == 0 { - continue - } - - for _, dv := range dependers.List() { - if targetedNodes.Include(dv) { - // Already present, so nothing to do - continue - } - - // We'll give the node some information about what it's - // depending on in case that informs its decision about whether - // it is safe to be targeted. - deps := g.DownEdges(v) - - depsTargeted := deps.Intersection(targetedNodes) - depsUntargeted := deps.Difference(depsTargeted) - - if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { - targetedNodes.Add(dv) - // Need to visit this node on the next pass to see if it - // has any transitive dependers. - queue = append(queue, dv) - } - } - } - } - - return targetedNodes.Filter(func(dv interface{}) bool { - return filterPartialOutputs(dv, targetedNodes, g) - }), nil -} - -// Outputs may have been included transitively, but if any of their -// dependencies have been pruned they won't be resolvable. -// If nothing depends on the output, and the output is missing any -// dependencies, remove it from the graph. -// This essentially maintains the previous behavior where interpolation in -// outputs would fail silently, but can now surface errors where the output -// is required. -func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { - // should this just be done with TargetDownstream? - if _, ok := v.(*NodeApplyableOutput); !ok { - return true - } - - dependers := g.UpEdges(v) - for _, d := range dependers.List() { - if _, ok := d.(*NodeCountBoundary); ok { - continue - } - - if !targetedNodes.Include(d) { - // this one is going to be removed, so it doesn't count - continue - } - - // as soon as we see a real dependency, we mark this as - // non-removable - return true - } - - depends := g.DownEdges(v) - - for _, d := range depends.List() { - if !targetedNodes.Include(d) { - log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", - dag.VertexName(v), dag.VertexName(d)) - return false - } - } - return true -} - -func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { - var vertexAddr addrs.Targetable - switch r := v.(type) { - case GraphNodeResourceInstance: - vertexAddr = r.ResourceInstanceAddr() - case GraphNodeResource: - vertexAddr = r.ResourceAddr() - default: - // Only resource and resource instance nodes can be targeted. - return false - } - _, ok := v.(GraphNodeResource) - if !ok { - return false - } - - for _, targetAddr := range targets { - if t.IgnoreIndices { - // If we're ignoring indices then we'll convert any resource instance - // addresses into resource addresses. We don't need to convert - // vertexAddr because instance addresses are contained within - // their associated resources, and so .TargetContains will take - // care of this for us. - if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { - targetAddr = instance.ContainingResource() - } - } - if targetAddr.TargetContains(vertexAddr) { - return true - } - } - - return false -} - -// RemovableIfNotTargeted is a special interface for graph nodes that -// aren't directly addressable, but need to be removed from the graph when they -// are not targeted. (Nodes that are not directly targeted end up in the set of -// targeted nodes because something that _is_ targeted depends on them.) The -// initial use case for this interface is GraphNodeConfigVariable, which was -// having trouble interpolating for module variables in targeted scenarios that -// filtered out the resource node being referenced. -type RemovableIfNotTargeted interface { - RemoveIfNotTargeted() bool -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go deleted file mode 100644 index 21842789..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -// TransitiveReductionTransformer is a GraphTransformer that performs -// finds the transitive reduction of the graph. For a definition of -// transitive reduction, see Wikipedia. -type TransitiveReductionTransformer struct{} - -func (t *TransitiveReductionTransformer) Transform(g *Graph) error { - // If the graph isn't valid, skip the transitive reduction. - // We don't error here because Terraform itself handles graph - // validation in a better way, or we assume it does. - if err := g.Validate(); err != nil { - return nil - } - - // Do it - g.TransitiveReduction() - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go deleted file mode 100644 index 05daa513..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go +++ /dev/null @@ -1,40 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" -) - -// RootVariableTransformer is a GraphTransformer that adds all the root -// variables to the graph. -// -// Root variables are currently no-ops but they must be added to the -// graph since downstream things that depend on them must be able to -// reach them. -type RootVariableTransformer struct { - Config *configs.Config -} - -func (t *RootVariableTransformer) Transform(g *Graph) error { - // We can have no variables if we have no config. - if t.Config == nil { - return nil - } - - // We're only considering root module variables here, since child - // module variables are handled by ModuleVariableTransformer. - vars := t.Config.Module.Variables - - // Add all variables here - for _, v := range vars { - node := &NodeRootVariable{ - Addr: addrs.InputVariable{ - Name: v.Name, - }, - Config: v, - } - g.Add(node) - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go deleted file mode 100644 index 6b1293fc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// VertexTransformer is a GraphTransformer that transforms vertices -// using the GraphVertexTransformers. The Transforms are run in sequential -// order. If a transform replaces a vertex then the next transform will see -// the new vertex. -type VertexTransformer struct { - Transforms []GraphVertexTransformer -} - -func (t *VertexTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - for _, vt := range t.Transforms { - newV, err := vt.Transform(v) - if err != nil { - return err - } - - // If the vertex didn't change, then don't do anything more - if newV == v { - continue - } - - // Vertex changed, replace it within the graph - if ok := g.Replace(v, newV); !ok { - // This should never happen, big problem - return fmt.Errorf( - "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", - dag.VertexName(v), dag.VertexName(newV), v, newV) - } - - // Replace v so that future transforms use the proper vertex - v = newV - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go deleted file mode 100644 index f6790d9e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -import "context" - -// UIInput is the interface that must be implemented to ask for input -// from this user. This should forward the request to wherever the user -// inputs things to ask for values. -type UIInput interface { - Input(context.Context, *InputOpts) (string, error) -} - -// InputOpts are options for asking for input. -type InputOpts struct { - // Id is a unique ID for the question being asked that might be - // used for logging or to look up a prior answered question. - Id string - - // Query is a human-friendly question for inputting this value. - Query string - - // Description is a description about what this option is. Be wary - // that this will probably be in a terminal so split lines as you see - // necessary. - Description string - - // Default will be the value returned if no data is entered. - Default string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go deleted file mode 100644 index e2d9c384..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go +++ /dev/null @@ -1,25 +0,0 @@ -package terraform - -import "context" - -// MockUIInput is an implementation of UIInput that can be used for tests. -type MockUIInput struct { - InputCalled bool - InputOpts *InputOpts - InputReturnMap map[string]string - InputReturnString string - InputReturnError error - InputFn func(*InputOpts) (string, error) -} - -func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - i.InputCalled = true - i.InputOpts = opts - if i.InputFn != nil { - return i.InputFn(opts) - } - if i.InputReturnMap != nil { - return i.InputReturnMap[opts.Id], i.InputReturnError - } - return i.InputReturnString, i.InputReturnError -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go deleted file mode 100644 index b5d32b1e..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go +++ /dev/null @@ -1,20 +0,0 @@ -package terraform - -import ( - "context" - "fmt" -) - -// PrefixUIInput is an implementation of UIInput that prefixes the ID -// with a string, allowing queries to be namespaced. -type PrefixUIInput struct { - IdPrefix string - QueryPrefix string - UIInput UIInput -} - -func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { - opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) - opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(ctx, opts) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go deleted file mode 100644 index 84427c63..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// UIOutput is the interface that must be implemented to output -// data to the end user. -type UIOutput interface { - Output(string) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go deleted file mode 100644 index 135a91c5..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go +++ /dev/null @@ -1,9 +0,0 @@ -package terraform - -type CallbackUIOutput struct { - OutputFn func(string) -} - -func (o *CallbackUIOutput) Output(v string) { - o.OutputFn(v) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go deleted file mode 100644 index d828c921..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ /dev/null @@ -1,21 +0,0 @@ -package terraform - -import "sync" - -// MockUIOutput is an implementation of UIOutput that can be used for tests. -type MockUIOutput struct { - sync.Mutex - OutputCalled bool - OutputMessage string - OutputFn func(string) -} - -func (o *MockUIOutput) Output(v string) { - o.Lock() - defer o.Unlock() - o.OutputCalled = true - o.OutputMessage = v - if o.OutputFn != nil { - o.OutputFn(v) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go deleted file mode 100644 index fff964f4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go +++ /dev/null @@ -1,19 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/addrs" -) - -// ProvisionerUIOutput is an implementation of UIOutput that calls a hook -// for the output so that the hooks can handle it. -type ProvisionerUIOutput struct { - InstanceAddr addrs.AbsResourceInstance - ProvisionerType string - Hooks []Hook -} - -func (o *ProvisionerUIOutput) Output(msg string) { - for _, h := range o.Hooks { - h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go deleted file mode 100644 index 97f1ec1f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go +++ /dev/null @@ -1,12 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/httpclient" -) - -// Generate a UserAgent string -// -// Deprecated: Use httpclient.UserAgent(version) instead -func UserAgentString() string { - return httpclient.UserAgentString() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go deleted file mode 100644 index 5428cd5a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/util.go +++ /dev/null @@ -1,75 +0,0 @@ -package terraform - -import ( - "sort" -) - -// Semaphore is a wrapper around a channel to provide -// utility methods to clarify that we are treating the -// channel as a semaphore -type Semaphore chan struct{} - -// NewSemaphore creates a semaphore that allows up -// to a given limit of simultaneous acquisitions -func NewSemaphore(n int) Semaphore { - if n == 0 { - panic("semaphore with limit 0") - } - ch := make(chan struct{}, n) - return Semaphore(ch) -} - -// Acquire is used to acquire an available slot. -// Blocks until available. -func (s Semaphore) Acquire() { - s <- struct{}{} -} - -// TryAcquire is used to do a non-blocking acquire. -// Returns a bool indicating success -func (s Semaphore) TryAcquire() bool { - select { - case s <- struct{}{}: - return true - default: - return false - } -} - -// Release is used to return a slot. Acquire must -// be called as a pre-condition. -func (s Semaphore) Release() { - select { - case <-s: - default: - panic("release without an acquire") - } -} - -// strSliceContains checks if a given string is contained in a slice -// When anybody asks why Go needs generics, here you go. -func strSliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// deduplicate a slice of strings -func uniqueStrings(s []string) []string { - if len(s) < 2 { - return s - } - - sort.Strings(s) - result := make([]string, 1, len(s)) - result[0] = s[0] - for i := 1; i < len(s); i++ { - if s[i] != result[len(result)-1] { - result = append(result, s[i]) - } - } - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go deleted file mode 100644 index 627593d7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValueFromUnknown-0] - _ = x[ValueFromConfig-67] - _ = x[ValueFromAutoFile-70] - _ = x[ValueFromNamedFile-78] - _ = x[ValueFromCLIArg-65] - _ = x[ValueFromEnvVar-69] - _ = x[ValueFromInput-73] - _ = x[ValueFromPlan-80] - _ = x[ValueFromCaller-83] -} - -const ( - _ValueSourceType_name_0 = "ValueFromUnknown" - _ValueSourceType_name_1 = "ValueFromCLIArg" - _ValueSourceType_name_2 = "ValueFromConfig" - _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" - _ValueSourceType_name_4 = "ValueFromInput" - _ValueSourceType_name_5 = "ValueFromNamedFile" - _ValueSourceType_name_6 = "ValueFromPlan" - _ValueSourceType_name_7 = "ValueFromCaller" -) - -var ( - _ValueSourceType_index_3 = [...]uint8{0, 15, 32} -) - -func (i ValueSourceType) String() string { - switch { - case i == 0: - return _ValueSourceType_name_0 - case i == 65: - return _ValueSourceType_name_1 - case i == 67: - return _ValueSourceType_name_2 - case 69 <= i && i <= 70: - i -= 69 - return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] - case i == 73: - return _ValueSourceType_name_4 - case i == 78: - return _ValueSourceType_name_5 - case i == 80: - return _ValueSourceType_name_6 - case i == 83: - return _ValueSourceType_name_7 - default: - return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go deleted file mode 100644 index 14f6a3cc..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/variables.go +++ /dev/null @@ -1,313 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/tfdiags" -) - -// InputValue represents a value for a variable in the root module, provided -// as part of the definition of an operation. -type InputValue struct { - Value cty.Value - SourceType ValueSourceType - - // SourceRange provides source location information for values whose - // SourceType is either ValueFromConfig or ValueFromFile. It is not - // populated for other source types, and so should not be used. - SourceRange tfdiags.SourceRange -} - -// ValueSourceType describes what broad category of source location provided -// a particular value. -type ValueSourceType rune - -const ( - // ValueFromUnknown is the zero value of ValueSourceType and is not valid. - ValueFromUnknown ValueSourceType = 0 - - // ValueFromConfig indicates that a value came from a .tf or .tf.json file, - // e.g. the default value defined for a variable. - ValueFromConfig ValueSourceType = 'C' - - // ValueFromAutoFile indicates that a value came from a "values file", like - // a .tfvars file, that was implicitly loaded by naming convention. - ValueFromAutoFile ValueSourceType = 'F' - - // ValueFromNamedFile indicates that a value came from a named "values file", - // like a .tfvars file, that was passed explicitly on the command line (e.g. - // -var-file=foo.tfvars). - ValueFromNamedFile ValueSourceType = 'N' - - // ValueFromCLIArg indicates that the value was provided directly in - // a CLI argument. The name of this argument is not recorded and so it must - // be inferred from context. - ValueFromCLIArg ValueSourceType = 'A' - - // ValueFromEnvVar indicates that the value was provided via an environment - // variable. The name of the variable is not recorded and so it must be - // inferred from context. - ValueFromEnvVar ValueSourceType = 'E' - - // ValueFromInput indicates that the value was provided at an interactive - // input prompt. - ValueFromInput ValueSourceType = 'I' - - // ValueFromPlan indicates that the value was retrieved from a stored plan. - ValueFromPlan ValueSourceType = 'P' - - // ValueFromCaller indicates that the value was explicitly overridden by - // a caller to Context.SetVariable after the context was constructed. - ValueFromCaller ValueSourceType = 'S' -) - -func (v *InputValue) GoString() string { - if (v.SourceRange != tfdiags.SourceRange{}) { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) - } else { - return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) - } -} - -func (v ValueSourceType) GoString() string { - return fmt.Sprintf("terraform.%s", v) -} - -//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType - -// InputValues is a map of InputValue instances. -type InputValues map[string]*InputValue - -// InputValuesFromCaller turns the given map of naked values into an -// InputValues that attributes each value to "a caller", using the source -// type ValueFromCaller. This is primarily useful for testing purposes. -// -// This should not be used as a general way to convert map[string]cty.Value -// into InputValues, since in most real cases we want to set a suitable -// other SourceType and possibly SourceRange value. -func InputValuesFromCaller(vals map[string]cty.Value) InputValues { - ret := make(InputValues, len(vals)) - for k, v := range vals { - ret[k] = &InputValue{ - Value: v, - SourceType: ValueFromCaller, - } - } - return ret -} - -// Override merges the given value maps with the receiver, overriding any -// conflicting keys so that the latest definition wins. -func (vv InputValues) Override(others ...InputValues) InputValues { - // FIXME: This should check to see if any of the values are maps and - // merge them if so, in order to preserve the behavior from prior to - // Terraform 0.12. - ret := make(InputValues) - for k, v := range vv { - ret[k] = v - } - for _, other := range others { - for k, v := range other { - ret[k] = v - } - } - return ret -} - -// JustValues returns a map that just includes the values, discarding the -// source information. -func (vv InputValues) JustValues() map[string]cty.Value { - ret := make(map[string]cty.Value, len(vv)) - for k, v := range vv { - ret[k] = v.Value - } - return ret -} - -// DefaultVariableValues returns an InputValues map representing the default -// values specified for variables in the given configuration map. -func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { - ret := make(InputValues) - for k, c := range configs { - if c.Default == cty.NilVal { - continue - } - ret[k] = &InputValue{ - Value: c.Default, - SourceType: ValueFromConfig, - SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), - } - } - return ret -} - -// SameValues returns true if the given InputValues has the same values as -// the receiever, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) SameValues(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - } - - return true -} - -// HasValues returns true if the reciever has the same values as in the given -// map, disregarding the source types and source ranges. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -func (vv InputValues) HasValues(vals map[string]cty.Value) bool { - if len(vv) != len(vals) { - return false - } - - for k, v := range vv { - oVal, exists := vals[k] - if !exists { - return false - } - if !v.Value.RawEquals(oVal) { - return false - } - } - - return true -} - -// Identical returns true if the given InputValues has the same values, -// source types, and source ranges as the receiver. -// -// Values are compared using the cty "RawEquals" method, which means that -// unknown values can be considered equal to one another if they are of the -// same type. -// -// This method is primarily for testing. For most practical purposes, it's -// better to use SameValues or HasValues. -func (vv InputValues) Identical(other InputValues) bool { - if len(vv) != len(other) { - return false - } - - for k, v := range vv { - ov, exists := other[k] - if !exists { - return false - } - if !v.Value.RawEquals(ov.Value) { - return false - } - if v.SourceType != ov.SourceType { - return false - } - if v.SourceRange != ov.SourceRange { - return false - } - } - - return true -} - -// checkInputVariables ensures that variable values supplied at the UI conform -// to their corresponding declarations in configuration. -// -// The set of values is considered valid only if the returned diagnostics -// does not contain errors. A valid set of values may still produce warnings, -// which should be returned to the user. -func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { - var diags tfdiags.Diagnostics - - for name, vc := range vcs { - val, isSet := vs[name] - if !isSet { - // Always an error, since the caller should already have included - // default values from the configuration in the values map. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unassigned variable", - fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), - )) - continue - } - - wantType := vc.Type - - // A given value is valid if it can convert to the desired type. - _, err := convert.Convert(val.Value, wantType) - if err != nil { - switch val.SourceType { - case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: - // We have source location information for these. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid value for input variable", - Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), - Subject: val.SourceRange.ToHCL().Ptr(), - }) - case ValueFromEnvVar: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromCLIArg: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), - )) - case ValueFromInput: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), - )) - default: - // The above gets us good coverage for the situations users - // are likely to encounter with their own inputs. The other - // cases are generally implementation bugs, so we'll just - // use a generic error for these. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Invalid value for input variable", - fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), - )) - } - } - } - - // Check for any variables that are assigned without being configured. - // This is always an implementation error in the caller, because we - // expect undefined variables to be caught during context construction - // where there is better context to report it well. - for name := range vs { - if _, defined := vcs[name]; !defined { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Value assigned to undeclared variable", - fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), - )) - } - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go deleted file mode 100644 index 0caeca0a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ /dev/null @@ -1,10 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/version" -) - -// Deprecated: Providers should use schema.Provider.TerraformVersion instead -func VersionString() string { - return version.String() -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go deleted file mode 100644 index ba9af1d1..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/tfdiags" - - "github.com/hashicorp/terraform/configs" - - tfversion "github.com/hashicorp/terraform/version" -) - -// CheckCoreVersionRequirements visits each of the modules in the given -// configuration tree and verifies that any given Core version constraints -// match with the version of Terraform Core that is being used. -// -// The returned diagnostics will contain errors if any constraints do not match. -// The returned diagnostics might also return warnings, which should be -// displayed to the user. -func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { - if config == nil { - return nil - } - - var diags tfdiags.Diagnostics - module := config.Module - - for _, constraint := range module.CoreVersionConstraints { - if !constraint.Required.Check(tfversion.SemVer) { - switch { - case len(config.Path) == 0: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - default: - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported Terraform Core version", - Detail: fmt.Sprintf( - "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", - config.Path, config.SourceAddr, tfversion.String(), - ), - Subject: &constraint.DeclRange, - }) - } - } - } - - for _, c := range config.Children { - childDiags := CheckCoreVersionRequirements(c) - diags = diags.Append(childDiags) - } - - return diags -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go deleted file mode 100644 index 0666aa5f..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. - -package terraform - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[walkInvalid-0] - _ = x[walkApply-1] - _ = x[walkPlan-2] - _ = x[walkPlanDestroy-3] - _ = x[walkRefresh-4] - _ = x[walkValidate-5] - _ = x[walkDestroy-6] - _ = x[walkImport-7] - _ = x[walkEval-8] -} - -const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" - -var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} - -func (i walkOperation) String() string { - if i >= walkOperation(len(_walkOperation_index)-1) { - return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go deleted file mode 100644 index 8e41f46e..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go +++ /dev/null @@ -1,68 +0,0 @@ -package tfdiags - -import ( - "bytes" - "fmt" - "strconv" - - "github.com/zclconf/go-cty/cty" -) - -// FormatCtyPath is a helper function to produce a user-friendly string -// representation of a cty.Path. The result uses a syntax similar to the -// HCL expression language in the hope of it being familiar to users. -func FormatCtyPath(path cty.Path) string { - var buf bytes.Buffer - for _, step := range path { - switch ts := step.(type) { - case cty.GetAttrStep: - fmt.Fprintf(&buf, ".%s", ts.Name) - case cty.IndexStep: - buf.WriteByte('[') - key := ts.Key - keyTy := key.Type() - switch { - case key.IsNull(): - buf.WriteString("null") - case !key.IsKnown(): - buf.WriteString("(not yet known)") - case keyTy == cty.Number: - bf := key.AsBigFloat() - buf.WriteString(bf.Text('g', -1)) - case keyTy == cty.String: - buf.WriteString(strconv.Quote(key.AsString())) - default: - buf.WriteString("...") - } - buf.WriteByte(']') - } - } - return buf.String() -} - -// FormatError is a helper function to produce a user-friendly string -// representation of certain special error types that we might want to -// include in diagnostic messages. -// -// This currently has special behavior only for cty.PathError, where a -// non-empty path is rendered in a HCL-like syntax as context. -func FormatError(err error) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return err.Error() - } - - return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) -} - -// FormatErrorPrefixed is like FormatError except that it presents any path -// information after the given prefix string, which is assumed to contain -// an HCL syntax representation of the value that errors are relative to. -func FormatErrorPrefixed(err error, prefix string) string { - perr, ok := err.(cty.PathError) - if !ok || len(perr.Path) == 0 { - return fmt.Sprintf("%s: %s", prefix, err.Error()) - } - - return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go b/vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go deleted file mode 100644 index 06f3d52c..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/consolidate_warnings.go +++ /dev/null @@ -1,146 +0,0 @@ -package tfdiags - -import "fmt" - -// ConsolidateWarnings checks if there is an unreasonable amount of warnings -// with the same summary in the receiver and, if so, returns a new diagnostics -// with some of those warnings consolidated into a single warning in order -// to reduce the verbosity of the output. -// -// This mechanism is here primarily for diagnostics printed out at the CLI. In -// other contexts it is likely better to just return the warnings directly, -// particularly if they are going to be interpreted by software rather than -// by a human reader. -// -// The returned slice always has a separate backing array from the reciever, -// but some diagnostic values themselves might be shared. -// -// The definition of "unreasonable" is given as the threshold argument. At most -// that many warnings with the same summary will be shown. -func (diags Diagnostics) ConsolidateWarnings(threshold int) Diagnostics { - if len(diags) == 0 { - return nil - } - - newDiags := make(Diagnostics, 0, len(diags)) - - // We'll track how many times we've seen each warning summary so we can - // decide when to start consolidating. Once we _have_ started consolidating, - // we'll also track the object representing the consolidated warning - // so we can continue appending to it. - warningStats := make(map[string]int) - warningGroups := make(map[string]*warningGroup) - - for _, diag := range diags { - severity := diag.Severity() - if severity != Warning || diag.Source().Subject == nil { - // Only warnings can get special treatment, and we only - // consolidate warnings that have source locations because - // our primary goal here is to deal with the situation where - // some configuration language feature is producing a warning - // each time it's used across a potentially-large config. - newDiags = newDiags.Append(diag) - continue - } - - desc := diag.Description() - summary := desc.Summary - if g, ok := warningGroups[summary]; ok { - // We're already grouping this one, so we'll just continue it. - g.Append(diag) - continue - } - - warningStats[summary]++ - if warningStats[summary] == threshold { - // Initially creating the group doesn't really change anything - // visibly in the result, since a group with only one warning - // is just a passthrough anyway, but once we do this any additional - // warnings with the same summary will get appended to this group. - g := &warningGroup{} - newDiags = newDiags.Append(g) - warningGroups[summary] = g - g.Append(diag) - continue - } - - // If this warning is not consolidating yet then we'll just append - // it directly. - newDiags = newDiags.Append(diag) - } - - return newDiags -} - -// A warningGroup is one or more warning diagnostics grouped together for -// UI consolidation purposes. -// -// A warningGroup with only one diagnostic in it is just a passthrough for -// that one diagnostic. If it has more than one then it will behave mostly -// like the first one but its detail message will include an additional -// sentence mentioning the consolidation. A warningGroup with no diagnostics -// at all is invalid and will panic when used. -type warningGroup struct { - Warnings Diagnostics -} - -var _ Diagnostic = (*warningGroup)(nil) - -func (wg *warningGroup) Severity() Severity { - return wg.Warnings[0].Severity() -} - -func (wg *warningGroup) Description() Description { - desc := wg.Warnings[0].Description() - if len(wg.Warnings) < 2 { - return desc - } - extraCount := len(wg.Warnings) - 1 - var msg string - switch extraCount { - case 1: - msg = "(and one more similar warning elsewhere)" - default: - msg = fmt.Sprintf("(and %d more similar warnings elsewhere)", extraCount) - } - if desc.Detail != "" { - desc.Detail = desc.Detail + "\n\n" + msg - } else { - desc.Detail = msg - } - return desc -} - -func (wg *warningGroup) Source() Source { - return wg.Warnings[0].Source() -} - -func (wg *warningGroup) FromExpr() *FromExpr { - return wg.Warnings[0].FromExpr() -} - -func (wg *warningGroup) Append(diag Diagnostic) { - if diag.Severity() != Warning { - panic("can't append a non-warning diagnostic to a warningGroup") - } - wg.Warnings = append(wg.Warnings, diag) -} - -// WarningGroupSourceRanges can be used in conjunction with -// Diagnostics.ConsolidateWarnings to recover the full set of original source -// locations from a consolidated warning. -// -// For convenience, this function accepts any diagnostic and will just return -// the single Source value from any diagnostic that isn't a warning group. -func WarningGroupSourceRanges(diag Diagnostic) []Source { - wg, ok := diag.(*warningGroup) - if !ok { - return []Source{diag.Source()} - } - - ret := make([]Source, len(wg.Warnings)) - for i, wrappedDiag := range wg.Warnings { - ret[i] = wrappedDiag.Source() - } - return ret -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go deleted file mode 100644 index d55bc2f0..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go +++ /dev/null @@ -1,372 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/gocty" -) - -// The "contextual" family of diagnostics are designed to allow separating -// the detection of a problem from placing that problem in context. For -// example, some code that is validating an object extracted from configuration -// may not have access to the configuration that generated it, but can still -// report problems within that object which the caller can then place in -// context by calling IsConfigBody on the returned diagnostics. -// -// When contextual diagnostics are used, the documentation for a method must -// be very explicit about what context is implied for any diagnostics returned, -// to help ensure the expected result. - -// contextualFromConfig is an interface type implemented by diagnostic types -// that can elaborate themselves when given information about the configuration -// body they are embedded in. -// -// Usually this entails extracting source location information in order to -// populate the "Subject" range. -type contextualFromConfigBody interface { - ElaborateFromConfigBody(hcl.Body) Diagnostic -} - -// InConfigBody returns a copy of the receiver with any config-contextual -// diagnostics elaborated in the context of the given body. -func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { - if len(d) == 0 { - return nil - } - - ret := make(Diagnostics, len(d)) - for i, srcDiag := range d { - if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { - ret[i] = cd.ElaborateFromConfigBody(body) - } else { - ret[i] = srcDiag - } - } - - return ret -} - -// AttributeValue returns a diagnostic about an attribute value in an implied current -// configuration context. This should be returned only from functions whose -// interface specifies a clear configuration context that this will be -// resolved in. -// -// The given path is relative to the implied configuration context. To describe -// a top-level attribute, it should be a single-element cty.Path with a -// cty.GetAttrStep. It's assumed that the path is returning into a structure -// that would be produced by our conventions in the configschema package; it -// may return unexpected results for structures that can't be represented by -// configschema. -// -// Since mapping attribute paths back onto configuration is an imprecise -// operation (e.g. dynamic block generation may cause the same block to be -// evaluated multiple times) the diagnostic detail should include the attribute -// name and other context required to help the user understand what is being -// referenced in case the identified source range is not unique. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is the value assigned to the -// named attribute, or the containing body's "missing item range" if no -// value is present. -func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { - return &attributeDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - attrPath: attrPath, - } -} - -// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains -// one. Normally this is not accessed directly, and instead the config body is -// added to the Diagnostic to create a more complete message for the user. In -// some cases however, we may want to know just the name of the attribute that -// generated the Diagnostic message. -// This returns a nil cty.Path if it does not exist in the Diagnostic. -func GetAttribute(d Diagnostic) cty.Path { - if d, ok := d.(*attributeDiagnostic); ok { - return d.attrPath - } - return nil -} - -type attributeDiagnostic struct { - diagnosticBase - attrPath cty.Path - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -// ElaborateFromConfigBody finds the most accurate possible source location -// for a diagnostic's attribute path within the given body. -// -// Backing out from a path back to a source location is not always entirely -// possible because we lose some information in the decoding process, so -// if an exact position cannot be found then the returned diagnostic will -// refer to a position somewhere within the containing body, which is assumed -// to be better than no location at all. -// -// If possible it is generally better to report an error at a layer where -// source location information is still available, for more accuracy. This -// is not always possible due to system architecture, so this serves as a -// "best effort" fallback behavior for such situations. -func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if len(d.attrPath) < 1 { - // Should never happen, but we'll allow it rather than crashing. - return d - } - - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - - // This function will often end up re-decoding values that were already - // decoded by an earlier step. This is non-ideal but is architecturally - // more convenient than arranging for source location information to be - // propagated to every place in Terraform, and this happens only in the - // presence of errors where performance isn't a concern. - - traverse := d.attrPath[:] - final := d.attrPath[len(d.attrPath)-1] - - // Index should never be the first step - // as indexing of top blocks (such as resources & data sources) - // is handled elsewhere - if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret - } - - // Process index separately - idxStep, hasIdx := final.(cty.IndexStep) - if hasIdx { - final = d.attrPath[len(d.attrPath)-2] - traverse = d.attrPath[:len(d.attrPath)-1] - } - - // If we have more than one step after removing index - // then we'll first try to traverse to a child body - // corresponding to the requested path. - if len(traverse) > 1 { - body = traversePathSteps(traverse, body) - } - - // Default is to indicate a missing item in the deepest body we reached - // while traversing. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - - // Once we get here, "final" should be a GetAttr step that maps to an - // attribute in our current body. - finalStep, isAttr := final.(cty.GetAttrStep) - if !isAttr { - return &ret - } - - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: finalStep.Name, - Required: true, - }, - }, - }) - if contentDiags.HasErrors() { - return &ret - } - - if attr, ok := content.Attributes[finalStep.Name]; ok { - hclRange := attr.Expr.Range() - if hasIdx { - // Try to be more precise by finding index range - hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) - } - subject = SourceRangeFromHCL(hclRange) - ret.subject = &subject - } - - return &ret -} - -func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { - for i := 0; i < len(traverse); i++ { - step := traverse[i] - - switch tStep := step.(type) { - case cty.GetAttrStep: - - var next cty.PathStep - if i < (len(traverse) - 1) { - next = traverse[i+1] - } - - // Will be indexing into our result here? - var indexType cty.Type - var indexVal cty.Value - if nextIndex, ok := next.(cty.IndexStep); ok { - indexVal = nextIndex.Key - indexType = indexVal.Type() - i++ // skip over the index on subsequent iterations - } - - var blockLabelNames []string - if indexType == cty.String { - // Map traversal means we expect one label for the key. - blockLabelNames = []string{"key"} - } - - // For intermediate steps we expect to be referring to a child - // block, so we'll attempt decoding under that assumption. - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: tStep.Name, - LabelNames: blockLabelNames, - }, - }, - }) - if contentDiags.HasErrors() { - return body - } - filtered := make([]*hcl.Block, 0, len(content.Blocks)) - for _, block := range content.Blocks { - if block.Type == tStep.Name { - filtered = append(filtered, block) - } - } - if len(filtered) == 0 { - // Step doesn't refer to a block - continue - } - - switch indexType { - case cty.NilType: // no index at all - if len(filtered) != 1 { - return body - } - body = filtered[0].Body - case cty.Number: - var idx int - err := gocty.FromCtyValue(indexVal, &idx) - if err != nil || idx >= len(filtered) { - return body - } - body = filtered[idx].Body - case cty.String: - key := indexVal.AsString() - var block *hcl.Block - for _, candidate := range filtered { - if candidate.Labels[0] == key { - block = candidate - break - } - } - if block == nil { - // No block with this key, so we'll just indicate a - // missing item in the containing block. - return body - } - body = block.Body - default: - // Should never happen, because only string and numeric indices - // are supported by cty collections. - return body - } - - default: - // For any other kind of step, we'll just return our current body - // as the subject and accept that this is a little inaccurate. - return body - } - } - return body -} - -func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { - switch idxStep.Key.Type() { - case cty.Number: - var idx int - err := gocty.FromCtyValue(idxStep.Key, &idx) - items, diags := hcl.ExprList(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - if err != nil || idx >= len(items) { - return attr.NameRange - } - return items[idx].Range() - case cty.String: - pairs, diags := hcl.ExprMap(attr.Expr) - if diags.HasErrors() { - return attr.Expr.Range() - } - stepKey := idxStep.Key.AsString() - for _, kvPair := range pairs { - key, diags := kvPair.Key.Value(nil) - if diags.HasErrors() { - return attr.Expr.Range() - } - if key.AsString() == stepKey { - startRng := kvPair.Value.StartRange() - return startRng - } - } - return attr.NameRange - } - return attr.Expr.Range() -} - -func (d *attributeDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} - -// WholeContainingBody returns a diagnostic about the body that is an implied -// current configuration context. This should be returned only from -// functions whose interface specifies a clear configuration context that this -// will be resolved in. -// -// The returned attribute will not have source location information until -// context is applied to the containing diagnostics using diags.InConfigBody. -// After context is applied, the source location is currently the missing item -// range of the body. In future, this may change to some other suitable -// part of the containing body. -func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { - return &wholeBodyDiagnostic{ - diagnosticBase: diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - }, - } -} - -type wholeBodyDiagnostic struct { - diagnosticBase - subject *SourceRange // populated only after ElaborateFromConfigBody -} - -func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { - if d.subject != nil { - // Don't modify an already-elaborated diagnostic. - return d - } - - ret := *d - rng := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &rng - return &ret -} - -func (d *wholeBodyDiagnostic) Source() Source { - return Source{ - Subject: d.subject, - } -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go deleted file mode 100644 index a7699cf0..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go +++ /dev/null @@ -1,40 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -type Diagnostic interface { - Severity() Severity - Description() Description - Source() Source - - // FromExpr returns the expression-related context for the diagnostic, if - // available. Returns nil if the diagnostic is not related to an - // expression evaluation. - FromExpr() *FromExpr -} - -type Severity rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity - -const ( - Error Severity = 'E' - Warning Severity = 'W' -) - -type Description struct { - Summary string - Detail string -} - -type Source struct { - Subject *SourceRange - Context *SourceRange -} - -type FromExpr struct { - Expression hcl.Expression - EvalContext *hcl.EvalContext -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go deleted file mode 100644 index 50bf9d8e..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go +++ /dev/null @@ -1,31 +0,0 @@ -package tfdiags - -// diagnosticBase can be embedded in other diagnostic structs to get -// default implementations of Severity and Description. This type also -// has default implementations of Source and FromExpr that return no source -// location or expression-related information, so embedders should generally -// override those method to return more useful results where possible. -type diagnosticBase struct { - severity Severity - summary string - detail string -} - -func (d diagnosticBase) Severity() Severity { - return d.severity -} - -func (d diagnosticBase) Description() Description { - return Description{ - Summary: d.summary, - Detail: d.detail, - } -} - -func (d diagnosticBase) Source() Source { - return Source{} -} - -func (d diagnosticBase) FromExpr() *FromExpr { - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go deleted file mode 100644 index 30476ee2..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go +++ /dev/null @@ -1,330 +0,0 @@ -package tfdiags - -import ( - "bytes" - "fmt" - "path/filepath" - "sort" - "strings" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" -) - -// Diagnostics is a list of diagnostics. Diagnostics is intended to be used -// where a Go "error" might normally be used, allowing richer information -// to be conveyed (more context, support for warnings). -// -// A nil Diagnostics is a valid, empty diagnostics list, thus allowing -// heap allocation to be avoided in the common case where there are no -// diagnostics to report at all. -type Diagnostics []Diagnostic - -// Append is the main interface for constructing Diagnostics lists, taking -// an existing list (which may be nil) and appending the new objects to it -// after normalizing them to be implementations of Diagnostic. -// -// The usual pattern for a function that natively "speaks" diagnostics is: -// -// // Create a nil Diagnostics at the start of the function -// var diags diag.Diagnostics -// -// // At later points, build on it if errors / warnings occur: -// foo, err := DoSomethingRisky() -// if err != nil { -// diags = diags.Append(err) -// } -// -// // Eventually return the result and diagnostics in place of error -// return result, diags -// -// Append accepts a variety of different diagnostic-like types, including -// native Go errors and HCL diagnostics. It also knows how to unwrap -// a multierror.Error into separate error diagnostics. It can be passed -// another Diagnostics to concatenate the two lists. If given something -// it cannot handle, this function will panic. -func (diags Diagnostics) Append(new ...interface{}) Diagnostics { - for _, item := range new { - if item == nil { - continue - } - - switch ti := item.(type) { - case Diagnostic: - diags = append(diags, ti) - case Diagnostics: - diags = append(diags, ti...) // flatten - case diagnosticsAsError: - diags = diags.Append(ti.Diagnostics) // unwrap - case NonFatalError: - diags = diags.Append(ti.Diagnostics) // unwrap - case hcl.Diagnostics: - for _, hclDiag := range ti { - diags = append(diags, hclDiagnostic{hclDiag}) - } - case *hcl.Diagnostic: - diags = append(diags, hclDiagnostic{ti}) - case *multierror.Error: - for _, err := range ti.Errors { - diags = append(diags, nativeError{err}) - } - case error: - switch { - case errwrap.ContainsType(ti, Diagnostics(nil)): - // If we have an errwrap wrapper with a Diagnostics hiding - // inside then we'll unpick it here to get access to the - // individual diagnostics. - diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) - case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): - // Likewise, if we have HCL diagnostics we'll unpick that too. - diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) - default: - diags = append(diags, nativeError{ti}) - } - default: - panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) - } - } - - // Given the above, we should never end up with a non-nil empty slice - // here, but we'll make sure of that so callers can rely on empty == nil - if len(diags) == 0 { - return nil - } - - return diags -} - -// HasErrors returns true if any of the diagnostics in the list have -// a severity of Error. -func (diags Diagnostics) HasErrors() bool { - for _, diag := range diags { - if diag.Severity() == Error { - return true - } - } - return false -} - -// ForRPC returns a version of the receiver that has been simplified so that -// it is friendly to RPC protocols. -// -// Currently this means that it can be serialized with encoding/gob and -// subsequently re-inflated. It may later grow to include other serialization -// formats. -// -// Note that this loses information about the original objects used to -// construct the diagnostics, so e.g. the errwrap API will not work as -// expected on an error-wrapped Diagnostics that came from ForRPC. -func (diags Diagnostics) ForRPC() Diagnostics { - ret := make(Diagnostics, len(diags)) - for i := range diags { - ret[i] = makeRPCFriendlyDiag(diags[i]) - } - return ret -} - -// Err flattens a diagnostics list into a single Go error, or to nil -// if the diagnostics list does not include any error-level diagnostics. -// -// This can be used to smuggle diagnostics through an API that deals in -// native errors, but unfortunately it will lose naked warnings (warnings -// that aren't accompanied by at least one error) since such APIs have no -// mechanism through which to report these. -// -// return result, diags.Error() -func (diags Diagnostics) Err() error { - if !diags.HasErrors() { - return nil - } - return diagnosticsAsError{diags} -} - -// ErrWithWarnings is similar to Err except that it will also return a non-nil -// error if the receiver contains only warnings. -// -// In the warnings-only situation, the result is guaranteed to be of dynamic -// type NonFatalError, allowing diagnostics-aware callers to type-assert -// and unwrap it, treating it as non-fatal. -// -// This should be used only in contexts where the caller is able to recognize -// and handle NonFatalError. For normal callers that expect a lack of errors -// to be signaled by nil, use just Diagnostics.Err. -func (diags Diagnostics) ErrWithWarnings() error { - if len(diags) == 0 { - return nil - } - if diags.HasErrors() { - return diags.Err() - } - return NonFatalError{diags} -} - -// NonFatalErr is similar to Err except that it always returns either nil -// (if there are no diagnostics at all) or NonFatalError. -// -// This allows diagnostics to be returned over an error return channel while -// being explicit that the diagnostics should not halt processing. -// -// This should be used only in contexts where the caller is able to recognize -// and handle NonFatalError. For normal callers that expect a lack of errors -// to be signaled by nil, use just Diagnostics.Err. -func (diags Diagnostics) NonFatalErr() error { - if len(diags) == 0 { - return nil - } - return NonFatalError{diags} -} - -// Sort applies an ordering to the diagnostics in the receiver in-place. -// -// The ordering is: warnings before errors, sourceless before sourced, -// short source paths before long source paths, and then ordering by -// position within each file. -// -// Diagnostics that do not differ by any of these sortable characteristics -// will remain in the same relative order after this method returns. -func (diags Diagnostics) Sort() { - sort.Stable(sortDiagnostics(diags)) -} - -type diagnosticsAsError struct { - Diagnostics -} - -func (dae diagnosticsAsError) Error() string { - diags := dae.Diagnostics - switch { - case len(diags) == 0: - // should never happen, since we don't create this wrapper if - // there are no diagnostics in the list. - return "no errors" - case len(diags) == 1: - desc := diags[0].Description() - if desc.Detail == "" { - return desc.Summary - } - return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) - default: - var ret bytes.Buffer - fmt.Fprintf(&ret, "%d problems:\n", len(diags)) - for _, diag := range dae.Diagnostics { - desc := diag.Description() - if desc.Detail == "" { - fmt.Fprintf(&ret, "\n- %s", desc.Summary) - } else { - fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) - } - } - return ret.String() - } -} - -// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped -// diagnostics object can be picked apart by errwrap-aware code. -func (dae diagnosticsAsError) WrappedErrors() []error { - var errs []error - for _, diag := range dae.Diagnostics { - if wrapper, isErr := diag.(nativeError); isErr { - errs = append(errs, wrapper.err) - } - } - return errs -} - -// NonFatalError is a special error type, returned by -// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, -// that indicates that the wrapped diagnostics should be treated as non-fatal. -// Callers can conditionally type-assert an error to this type in order to -// detect the non-fatal scenario and handle it in a different way. -type NonFatalError struct { - Diagnostics -} - -func (woe NonFatalError) Error() string { - diags := woe.Diagnostics - switch { - case len(diags) == 0: - // should never happen, since we don't create this wrapper if - // there are no diagnostics in the list. - return "no errors or warnings" - case len(diags) == 1: - desc := diags[0].Description() - if desc.Detail == "" { - return desc.Summary - } - return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) - default: - var ret bytes.Buffer - if diags.HasErrors() { - fmt.Fprintf(&ret, "%d problems:\n", len(diags)) - } else { - fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) - } - for _, diag := range woe.Diagnostics { - desc := diag.Description() - if desc.Detail == "" { - fmt.Fprintf(&ret, "\n- %s", desc.Summary) - } else { - fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) - } - } - return ret.String() - } -} - -// sortDiagnostics is an implementation of sort.Interface -type sortDiagnostics []Diagnostic - -var _ sort.Interface = sortDiagnostics(nil) - -func (sd sortDiagnostics) Len() int { - return len(sd) -} - -func (sd sortDiagnostics) Less(i, j int) bool { - iD, jD := sd[i], sd[j] - iSev, jSev := iD.Severity(), jD.Severity() - iSrc, jSrc := iD.Source(), jD.Source() - - switch { - - case iSev != jSev: - return iSev == Warning - - case (iSrc.Subject == nil) != (jSrc.Subject == nil): - return iSrc.Subject == nil - - case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: - iSubj := iSrc.Subject - jSubj := jSrc.Subject - switch { - case iSubj.Filename != jSubj.Filename: - // Path with fewer segments goes first if they are different lengths - sep := string(filepath.Separator) - iCount := strings.Count(iSubj.Filename, sep) - jCount := strings.Count(jSubj.Filename, sep) - if iCount != jCount { - return iCount < jCount - } - return iSubj.Filename < jSubj.Filename - case iSubj.Start.Byte != jSubj.Start.Byte: - return iSubj.Start.Byte < jSubj.Start.Byte - case iSubj.End.Byte != jSubj.End.Byte: - return iSubj.End.Byte < jSubj.End.Byte - } - fallthrough - - default: - // The remaining properties do not have a defined ordering, so - // we'll leave it unspecified. Since we use sort.Stable in - // the caller of this, the ordering of remaining items will - // be preserved. - return false - } -} - -func (sd sortDiagnostics) Swap(i, j int) { - sd[i], sd[j] = sd[j], sd[i] -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go deleted file mode 100644 index 13f7a714..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/error.go +++ /dev/null @@ -1,28 +0,0 @@ -package tfdiags - -// nativeError is a Diagnostic implementation that wraps a normal Go error -type nativeError struct { - err error -} - -var _ Diagnostic = nativeError{} - -func (e nativeError) Severity() Severity { - return Error -} - -func (e nativeError) Description() Description { - return Description{ - Summary: FormatError(e.err), - } -} - -func (e nativeError) Source() Source { - // No source information available for a native error - return Source{} -} - -func (e nativeError) FromExpr() *FromExpr { - // Native errors are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go deleted file mode 100644 index 8c781611..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go +++ /dev/null @@ -1,87 +0,0 @@ -package tfdiags - -import ( - "github.com/hashicorp/hcl/v2" -) - -// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic -type hclDiagnostic struct { - diag *hcl.Diagnostic -} - -var _ Diagnostic = hclDiagnostic{} - -func (d hclDiagnostic) Severity() Severity { - switch d.diag.Severity { - case hcl.DiagWarning: - return Warning - default: - return Error - } -} - -func (d hclDiagnostic) Description() Description { - return Description{ - Summary: d.diag.Summary, - Detail: d.diag.Detail, - } -} - -func (d hclDiagnostic) Source() Source { - var ret Source - if d.diag.Subject != nil { - rng := SourceRangeFromHCL(*d.diag.Subject) - ret.Subject = &rng - } - if d.diag.Context != nil { - rng := SourceRangeFromHCL(*d.diag.Context) - ret.Context = &rng - } - return ret -} - -func (d hclDiagnostic) FromExpr() *FromExpr { - if d.diag.Expression == nil || d.diag.EvalContext == nil { - return nil - } - return &FromExpr{ - Expression: d.diag.Expression, - EvalContext: d.diag.EvalContext, - } -} - -// SourceRangeFromHCL constructs a SourceRange from the corresponding range -// type within the HCL package. -func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { - return SourceRange{ - Filename: hclRange.Filename, - Start: SourcePos{ - Line: hclRange.Start.Line, - Column: hclRange.Start.Column, - Byte: hclRange.Start.Byte, - }, - End: SourcePos{ - Line: hclRange.End.Line, - Column: hclRange.End.Column, - Byte: hclRange.End.Byte, - }, - } -} - -// ToHCL constructs a HCL Range from the receiving SourceRange. This is the -// opposite of SourceRangeFromHCL. -func (r SourceRange) ToHCL() hcl.Range { - return hcl.Range{ - Filename: r.Filename, - Start: hcl.Pos{ - Line: r.Start.Line, - Column: r.Start.Column, - Byte: r.Start.Byte, - }, - End: hcl.Pos{ - Line: r.End.Line, - Column: r.End.Column, - Byte: r.End.Byte, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go deleted file mode 100644 index 485063b0..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go +++ /dev/null @@ -1,59 +0,0 @@ -package tfdiags - -import ( - "encoding/gob" -) - -type rpcFriendlyDiag struct { - Severity_ Severity - Summary_ string - Detail_ string - Subject_ *SourceRange - Context_ *SourceRange -} - -// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to -// RPC. -// -// In particular, it currently returns an object that can be serialized and -// later re-inflated using gob. This definition may grow to include other -// serializations later. -func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { - desc := diag.Description() - source := diag.Source() - return &rpcFriendlyDiag{ - Severity_: diag.Severity(), - Summary_: desc.Summary, - Detail_: desc.Detail, - Subject_: source.Subject, - Context_: source.Context, - } -} - -func (d *rpcFriendlyDiag) Severity() Severity { - return d.Severity_ -} - -func (d *rpcFriendlyDiag) Description() Description { - return Description{ - Summary: d.Summary_, - Detail: d.Detail_, - } -} - -func (d *rpcFriendlyDiag) Source() Source { - return Source{ - Subject: d.Subject_, - Context: d.Context_, - } -} - -func (d rpcFriendlyDiag) FromExpr() *FromExpr { - // RPC-friendly diagnostics cannot preserve expression information because - // expressions themselves are not RPC-friendly. - return nil -} - -func init() { - gob.Register((*rpcFriendlyDiag)(nil)) -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go deleted file mode 100644 index b0f1ecd4..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go +++ /dev/null @@ -1,30 +0,0 @@ -package tfdiags - -type simpleWarning string - -var _ Diagnostic = simpleWarning("") - -// SimpleWarning constructs a simple (summary-only) warning diagnostic. -func SimpleWarning(msg string) Diagnostic { - return simpleWarning(msg) -} - -func (e simpleWarning) Severity() Severity { - return Warning -} - -func (e simpleWarning) Description() Description { - return Description{ - Summary: string(e), - } -} - -func (e simpleWarning) Source() Source { - // No source information available for a simple warning - return Source{} -} - -func (e simpleWarning) FromExpr() *FromExpr { - // Simple warnings are not expression-related - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go deleted file mode 100644 index 3031168d..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go +++ /dev/null @@ -1,35 +0,0 @@ -package tfdiags - -import ( - "fmt" - "os" - "path/filepath" -) - -type SourceRange struct { - Filename string - Start, End SourcePos -} - -type SourcePos struct { - Line, Column, Byte int -} - -// StartString returns a string representation of the start of the range, -// including the filename and the line and column numbers. -func (r SourceRange) StartString() string { - filename := r.Filename - - // We'll try to relative-ize our filename here so it's less verbose - // in the common case of being in the current working directory. If not, - // we'll just show the full path. - wd, err := os.Getwd() - if err == nil { - relFn, err := filepath.Rel(wd, filename) - if err == nil { - filename = relFn - } - } - - return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) -} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go deleted file mode 100644 index eaa27373..00000000 --- a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go +++ /dev/null @@ -1,13 +0,0 @@ -package tfdiags - -// Sourceless creates and returns a diagnostic with no source location -// information. This is generally used for operational-type errors that are -// caused by or relate to the environment where Terraform is running rather -// than to the provided configuration. -func Sourceless(severity Severity, summary, detail string) Diagnostic { - return diagnosticBase{ - severity: severity, - summary: summary, - detail: detail, - } -} diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go deleted file mode 100644 index c8c37d32..00000000 --- a/vendor/github.com/hashicorp/terraform/version/version.go +++ /dev/null @@ -1,40 +0,0 @@ -// The version package provides a location to set the release versions for all -// packages to consume, without creating import cycles. -// -// This package should not import any other terraform packages. -package version - -import ( - "fmt" - - version "github.com/hashicorp/go-version" -) - -// The main version number that is being run at the moment. -var Version = "0.12.26" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var Prerelease = "" - -// SemVer is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVer *version.Version - -func init() { - SemVer = version.Must(version.NewVersion(Version)) -} - -// Header is the header name used to send the current terraform version -// in http requests. -const Header = "Terraform-Version" - -// String returns the complete version string, including prerelease -func String() string { - if Prerelease != "" { - return fmt.Sprintf("%s-%s", Version, Prerelease) - } - return Version -} diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod new file mode 100644 index 00000000..672a0e58 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/yamux diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go index 7abc7c74..18a078c8 100644 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -3,6 +3,7 @@ package yamux import ( "fmt" "io" + "log" "os" "time" ) @@ -30,8 +31,13 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 - // LogOutput is used to control the log destination + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger } // DefaultConfig is used to return a default configuration @@ -57,6 +63,11 @@ func VerifyConfig(config *Config) error { if config.MaxStreamWindowSize < initialStreamWindow { return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } return nil } diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index 32ba02e0..a80ddec3 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -86,9 +86,14 @@ type sendReady struct { // newSession is used to construct a new session func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + s := &Session{ config: config, - logger: log.New(config.LogOutput, "", log.LstdFlags), + logger: logger, conn: conn, bufRead: bufio.NewReader(conn), pings: make(map[uint32]chan struct{}), diff --git a/vendor/github.com/jstemmer/go-junit-report/.gitignore b/vendor/github.com/jstemmer/go-junit-report/.gitignore new file mode 100644 index 00000000..720bda60 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/.gitignore @@ -0,0 +1 @@ +go-junit-report diff --git a/vendor/github.com/jstemmer/go-junit-report/.travis.yml b/vendor/github.com/jstemmer/go-junit-report/.travis.yml new file mode 100644 index 00000000..d0dff3ef --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - tip + - "1.13.x" + - "1.12.x" + - "1.11.x" + - "1.10.x" + - "1.9.x" + - "1.8.x" + - "1.7.x" + - "1.6.x" + - "1.5.x" + - "1.4.x" + - "1.3.x" + - "1.2.x" diff --git a/vendor/github.com/jstemmer/go-junit-report/LICENSE b/vendor/github.com/jstemmer/go-junit-report/LICENSE new file mode 100644 index 00000000..f346564c --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012 Joel Stemmer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/jstemmer/go-junit-report/README.md b/vendor/github.com/jstemmer/go-junit-report/README.md new file mode 100644 index 00000000..5b5f608b --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/README.md @@ -0,0 +1,49 @@ +# go-junit-report + +Converts `go test` output to an xml report, suitable for applications that +expect junit xml reports (e.g. [Jenkins](http://jenkins-ci.org)). + +[![Build Status][travis-badge]][travis-link] +[![Report Card][report-badge]][report-link] + +## Installation + +Go version 1.2 or higher is required. Install or update using the `go get` +command: + +```bash +go get -u github.com/jstemmer/go-junit-report +``` + +## Usage + +go-junit-report reads the `go test` verbose output from standard in and writes +junit compatible XML to standard out. + +```bash +go test -v 2>&1 | go-junit-report > report.xml +``` + +Note that it also can parse benchmark output with `-bench` flag: +```bash +go test -v -bench . -count 5 2>&1 | go-junit-report > report.xml +``` + +## Contribution + +Create an Issue and discuss the fix or feature, then fork the package. +Clone to github.com/jstemmer/go-junit-report. This is necessary because go import uses this path. +Fix or implement feature. Test and then commit change. +Specify #Issue and describe change in the commit message. +Create Pull Request. It can be merged by owner or administrator then. + +### Run Tests + +```bash +go test +``` + +[travis-badge]: https://travis-ci.org/jstemmer/go-junit-report.svg +[travis-link]: https://travis-ci.org/jstemmer/go-junit-report +[report-badge]: https://goreportcard.com/badge/github.com/jstemmer/go-junit-report +[report-link]: https://goreportcard.com/report/github.com/jstemmer/go-junit-report diff --git a/vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go b/vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go new file mode 100644 index 00000000..6e1a0f31 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go @@ -0,0 +1,182 @@ +package formatter + +import ( + "bufio" + "encoding/xml" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/jstemmer/go-junit-report/parser" +) + +// JUnitTestSuites is a collection of JUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + Suites []JUnitTestSuite `xml:"testsuite"` +} + +// JUnitTestSuite is a single JUnit test suite which may contain many +// testcases. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time string `xml:"time,attr"` + Name string `xml:"name,attr"` + Properties []JUnitProperty `xml:"properties>property,omitempty"` + TestCases []JUnitTestCase `xml:"testcase"` +} + +// JUnitTestCase is a single test case with its result. +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Classname string `xml:"classname,attr"` + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"` + Failure *JUnitFailure `xml:"failure,omitempty"` +} + +// JUnitSkipMessage contains the reason why a testcase was skipped. +type JUnitSkipMessage struct { + Message string `xml:"message,attr"` +} + +// JUnitProperty represents a key/value pair used to define properties. +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitFailure contains data related to a failed test. +type JUnitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Contents string `xml:",chardata"` +} + +// JUnitReportXML writes a JUnit xml representation of the given report to w +// in the format described at http://windyroad.org/dl/Open%20Source/JUnit.xsd +func JUnitReportXML(report *parser.Report, noXMLHeader bool, goVersion string, w io.Writer) error { + suites := JUnitTestSuites{} + + // convert Report to JUnit test suites + for _, pkg := range report.Packages { + pkg.Benchmarks = mergeBenchmarks(pkg.Benchmarks) + ts := JUnitTestSuite{ + Tests: len(pkg.Tests) + len(pkg.Benchmarks), + Failures: 0, + Time: formatTime(pkg.Duration), + Name: pkg.Name, + Properties: []JUnitProperty{}, + TestCases: []JUnitTestCase{}, + } + + classname := pkg.Name + if idx := strings.LastIndex(classname, "/"); idx > -1 && idx < len(pkg.Name) { + classname = pkg.Name[idx+1:] + } + + // properties + if goVersion == "" { + // if goVersion was not specified as a flag, fall back to version reported by runtime + goVersion = runtime.Version() + } + ts.Properties = append(ts.Properties, JUnitProperty{"go.version", goVersion}) + if pkg.CoveragePct != "" { + ts.Properties = append(ts.Properties, JUnitProperty{"coverage.statements.pct", pkg.CoveragePct}) + } + + // individual test cases + for _, test := range pkg.Tests { + testCase := JUnitTestCase{ + Classname: classname, + Name: test.Name, + Time: formatTime(test.Duration), + Failure: nil, + } + + if test.Result == parser.FAIL { + ts.Failures++ + testCase.Failure = &JUnitFailure{ + Message: "Failed", + Type: "", + Contents: strings.Join(test.Output, "\n"), + } + } + + if test.Result == parser.SKIP { + testCase.SkipMessage = &JUnitSkipMessage{strings.Join(test.Output, "\n")} + } + + ts.TestCases = append(ts.TestCases, testCase) + } + + // individual benchmarks + for _, benchmark := range pkg.Benchmarks { + benchmarkCase := JUnitTestCase{ + Classname: classname, + Name: benchmark.Name, + Time: formatBenchmarkTime(benchmark.Duration), + } + + ts.TestCases = append(ts.TestCases, benchmarkCase) + } + + suites.Suites = append(suites.Suites, ts) + } + + // to xml + bytes, err := xml.MarshalIndent(suites, "", "\t") + if err != nil { + return err + } + + writer := bufio.NewWriter(w) + + if !noXMLHeader { + writer.WriteString(xml.Header) + } + + writer.Write(bytes) + writer.WriteByte('\n') + writer.Flush() + + return nil +} + +func mergeBenchmarks(benchmarks []*parser.Benchmark) []*parser.Benchmark { + var merged []*parser.Benchmark + benchmap := make(map[string][]*parser.Benchmark) + for _, bm := range benchmarks { + if _, ok := benchmap[bm.Name]; !ok { + merged = append(merged, &parser.Benchmark{Name: bm.Name}) + } + benchmap[bm.Name] = append(benchmap[bm.Name], bm) + } + + for _, bm := range merged { + for _, b := range benchmap[bm.Name] { + bm.Allocs += b.Allocs + bm.Bytes += b.Bytes + bm.Duration += b.Duration + } + n := len(benchmap[bm.Name]) + bm.Allocs /= n + bm.Bytes /= n + bm.Duration /= time.Duration(n) + } + + return merged +} + +func formatTime(d time.Duration) string { + return fmt.Sprintf("%.3f", d.Seconds()) +} + +func formatBenchmarkTime(d time.Duration) string { + return fmt.Sprintf("%.9f", d.Seconds()) +} diff --git a/vendor/github.com/jstemmer/go-junit-report/go-junit-report.go b/vendor/github.com/jstemmer/go-junit-report/go-junit-report.go new file mode 100644 index 00000000..1332f3b6 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/go-junit-report.go @@ -0,0 +1,45 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/jstemmer/go-junit-report/formatter" + "github.com/jstemmer/go-junit-report/parser" +) + +var ( + noXMLHeader = flag.Bool("no-xml-header", false, "do not print xml header") + packageName = flag.String("package-name", "", "specify a package name (compiled test have no package name in output)") + goVersionFlag = flag.String("go-version", "", "specify the value to use for the go.version property in the generated XML") + setExitCode = flag.Bool("set-exit-code", false, "set exit code to 1 if tests failed") +) + +func main() { + flag.Parse() + + if flag.NArg() != 0 { + fmt.Fprintf(os.Stderr, "%s does not accept positional arguments\n", os.Args[0]) + flag.Usage() + os.Exit(1) + } + + // Read input + report, err := parser.Parse(os.Stdin, *packageName) + if err != nil { + fmt.Printf("Error reading input: %s\n", err) + os.Exit(1) + } + + // Write xml + err = formatter.JUnitReportXML(report, *noXMLHeader, *goVersionFlag, os.Stdout) + if err != nil { + fmt.Printf("Error writing XML: %s\n", err) + os.Exit(1) + } + + if *setExitCode && report.Failures() > 0 { + os.Exit(1) + } +} diff --git a/vendor/github.com/jstemmer/go-junit-report/go.mod b/vendor/github.com/jstemmer/go-junit-report/go.mod new file mode 100644 index 00000000..de52369a --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/go.mod @@ -0,0 +1,3 @@ +module github.com/jstemmer/go-junit-report + +go 1.2 diff --git a/vendor/github.com/jstemmer/go-junit-report/parser/parser.go b/vendor/github.com/jstemmer/go-junit-report/parser/parser.go new file mode 100644 index 00000000..e268128a --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/parser/parser.go @@ -0,0 +1,319 @@ +package parser + +import ( + "bufio" + "io" + "regexp" + "strconv" + "strings" + "time" +) + +// Result represents a test result. +type Result int + +// Test result constants +const ( + PASS Result = iota + FAIL + SKIP +) + +// Report is a collection of package tests. +type Report struct { + Packages []Package +} + +// Package contains the test results of a single package. +type Package struct { + Name string + Duration time.Duration + Tests []*Test + Benchmarks []*Benchmark + CoveragePct string + + // Time is deprecated, use Duration instead. + Time int // in milliseconds +} + +// Test contains the results of a single test. +type Test struct { + Name string + Duration time.Duration + Result Result + Output []string + + SubtestIndent string + + // Time is deprecated, use Duration instead. + Time int // in milliseconds +} + +// Benchmark contains the results of a single benchmark. +type Benchmark struct { + Name string + Duration time.Duration + // number of B/op + Bytes int + // number of allocs/op + Allocs int +} + +var ( + regexStatus = regexp.MustCompile(`--- (PASS|FAIL|SKIP): (.+) \((\d+\.\d+)(?: seconds|s)\)`) + regexIndent = regexp.MustCompile(`^([ \t]+)---`) + regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+\.\d+)%\s+of\s+statements(?:\sin\s.+)?$`) + regexResult = regexp.MustCompile(`^(ok|FAIL)\s+([^ ]+)\s+(?:(\d+\.\d+)s|\(cached\)|(\[\w+ failed]))(?:\s+coverage:\s+(\d+\.\d+)%\sof\sstatements(?:\sin\s.+)?)?$`) + // regexBenchmark captures 3-5 groups: benchmark name, number of times ran, ns/op (with or without decimal), B/op (optional), and allocs/op (optional). + regexBenchmark = regexp.MustCompile(`^(Benchmark[^ -]+)(?:-\d+\s+|\s+)(\d+)\s+(\d+|\d+\.\d+)\sns/op(?:\s+(\d+)\sB/op)?(?:\s+(\d+)\sallocs/op)?`) + regexOutput = regexp.MustCompile(`( )*\t(.*)`) + regexSummary = regexp.MustCompile(`^(PASS|FAIL|SKIP)$`) + regexPackageWithTest = regexp.MustCompile(`^# ([^\[\]]+) \[[^\]]+\]$`) +) + +// Parse parses go test output from reader r and returns a report with the +// results. An optional pkgName can be given, which is used in case a package +// result line is missing. +func Parse(r io.Reader, pkgName string) (*Report, error) { + reader := bufio.NewReader(r) + + report := &Report{make([]Package, 0)} + + // keep track of tests we find + var tests []*Test + + // keep track of benchmarks we find + var benchmarks []*Benchmark + + // sum of tests' time, use this if current test has no result line (when it is compiled test) + var testsTime time.Duration + + // current test + var cur string + + // coverage percentage report for current package + var coveragePct string + + // stores mapping between package name and output of build failures + var packageCaptures = map[string][]string{} + + // the name of the package which it's build failure output is being captured + var capturedPackage string + + // capture any non-test output + var buffers = map[string][]string{} + + // parse lines + for { + l, _, err := reader.ReadLine() + if err != nil && err == io.EOF { + break + } else if err != nil { + return nil, err + } + + line := string(l) + + if strings.HasPrefix(line, "=== RUN ") { + // new test + cur = strings.TrimSpace(line[8:]) + tests = append(tests, &Test{ + Name: cur, + Result: FAIL, + Output: make([]string, 0), + }) + + // clear the current build package, so output lines won't be added to that build + capturedPackage = "" + } else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) == 6 { + bytes, _ := strconv.Atoi(matches[4]) + allocs, _ := strconv.Atoi(matches[5]) + + benchmarks = append(benchmarks, &Benchmark{ + Name: matches[1], + Duration: parseNanoseconds(matches[3]), + Bytes: bytes, + Allocs: allocs, + }) + } else if strings.HasPrefix(line, "=== PAUSE ") { + continue + } else if strings.HasPrefix(line, "=== CONT ") { + cur = strings.TrimSpace(line[8:]) + continue + } else if matches := regexResult.FindStringSubmatch(line); len(matches) == 6 { + if matches[5] != "" { + coveragePct = matches[5] + } + if strings.HasSuffix(matches[4], "failed]") { + // the build of the package failed, inject a dummy test into the package + // which indicate about the failure and contain the failure description. + tests = append(tests, &Test{ + Name: matches[4], + Result: FAIL, + Output: packageCaptures[matches[2]], + }) + } else if matches[1] == "FAIL" && !containsFailures(tests) && len(buffers[cur]) > 0 { + // This package didn't have any failing tests, but still it + // failed with some output. Create a dummy test with the + // output. + tests = append(tests, &Test{ + Name: "Failure", + Result: FAIL, + Output: buffers[cur], + }) + buffers[cur] = buffers[cur][0:0] + } + + // all tests in this package are finished + report.Packages = append(report.Packages, Package{ + Name: matches[2], + Duration: parseSeconds(matches[3]), + Tests: tests, + Benchmarks: benchmarks, + CoveragePct: coveragePct, + + Time: int(parseSeconds(matches[3]) / time.Millisecond), // deprecated + }) + + buffers[cur] = buffers[cur][0:0] + tests = make([]*Test, 0) + benchmarks = make([]*Benchmark, 0) + coveragePct = "" + cur = "" + testsTime = 0 + } else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 { + cur = matches[2] + test := findTest(tests, cur) + if test == nil { + continue + } + + // test status + if matches[1] == "PASS" { + test.Result = PASS + } else if matches[1] == "SKIP" { + test.Result = SKIP + } else { + test.Result = FAIL + } + + if matches := regexIndent.FindStringSubmatch(line); len(matches) == 2 { + test.SubtestIndent = matches[1] + } + + test.Output = buffers[cur] + + test.Name = matches[2] + test.Duration = parseSeconds(matches[3]) + testsTime += test.Duration + + test.Time = int(test.Duration / time.Millisecond) // deprecated + } else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 { + coveragePct = matches[1] + } else if matches := regexOutput.FindStringSubmatch(line); capturedPackage == "" && len(matches) == 3 { + // Sub-tests start with one or more series of 4-space indents, followed by a hard tab, + // followed by the test output + // Top-level tests start with a hard tab. + test := findTest(tests, cur) + if test == nil { + continue + } + test.Output = append(test.Output, matches[2]) + } else if strings.HasPrefix(line, "# ") { + // indicates a capture of build output of a package. set the current build package. + packageWithTestBinary := regexPackageWithTest.FindStringSubmatch(line) + if packageWithTestBinary != nil { + // Sometimes, the text after "# " shows the name of the test binary + // (".test") in addition to the package + // e.g.: "# package/name [package/name.test]" + capturedPackage = packageWithTestBinary[1] + } else { + capturedPackage = line[2:] + } + } else if capturedPackage != "" { + // current line is build failure capture for the current built package + packageCaptures[capturedPackage] = append(packageCaptures[capturedPackage], line) + } else if regexSummary.MatchString(line) { + // unset current test name so any additional output after the + // summary is captured separately. + cur = "" + } else { + // buffer anything else that we didn't recognize + buffers[cur] = append(buffers[cur], line) + + // if we have a current test, also append to its output + test := findTest(tests, cur) + if test != nil { + if strings.HasPrefix(line, test.SubtestIndent+" ") { + test.Output = append(test.Output, strings.TrimPrefix(line, test.SubtestIndent+" ")) + } + } + } + } + + if len(tests) > 0 { + // no result line found + report.Packages = append(report.Packages, Package{ + Name: pkgName, + Duration: testsTime, + Time: int(testsTime / time.Millisecond), + Tests: tests, + Benchmarks: benchmarks, + CoveragePct: coveragePct, + }) + } + + return report, nil +} + +func parseSeconds(t string) time.Duration { + if t == "" { + return time.Duration(0) + } + // ignore error + d, _ := time.ParseDuration(t + "s") + return d +} + +func parseNanoseconds(t string) time.Duration { + // note: if input < 1 ns precision, result will be 0s. + if t == "" { + return time.Duration(0) + } + // ignore error + d, _ := time.ParseDuration(t + "ns") + return d +} + +func findTest(tests []*Test, name string) *Test { + for i := len(tests) - 1; i >= 0; i-- { + if tests[i].Name == name { + return tests[i] + } + } + return nil +} + +func containsFailures(tests []*Test) bool { + for _, test := range tests { + if test.Result == FAIL { + return true + } + } + return false +} + +// Failures counts the number of failed tests in this report +func (r *Report) Failures() int { + count := 0 + + for _, p := range r.Packages { + for _, t := range p.Tests { + if t.Result == FAIL { + count++ + } + } + } + + return count +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 1f28d773..0b0aef83 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -9,7 +9,7 @@ import ( _ "github.com/mattn/go-isatty" ) -// NewColorable return new instance of Writer which handle escape sequence. +// NewColorable returns new instance of Writer which handles escape sequence. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") @@ -18,12 +18,12 @@ func NewColorable(file *os.File) io.Writer { return file } -// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return os.Stdout } -// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return os.Stderr } diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 887f203d..3fb771dc 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -10,7 +10,7 @@ import ( _ "github.com/mattn/go-isatty" ) -// NewColorable return new instance of Writer which handle escape sequence. +// NewColorable returns new instance of Writer which handles escape sequence. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") @@ -19,12 +19,12 @@ func NewColorable(file *os.File) io.Writer { return file } -// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return os.Stdout } -// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return os.Stderr } diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 404e10ca..1bd628f2 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -81,7 +81,7 @@ var ( procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) -// Writer provide colorable Writer to the console +// Writer provides colorable Writer to the console type Writer struct { out io.Writer handle syscall.Handle @@ -91,7 +91,7 @@ type Writer struct { rest bytes.Buffer } -// NewColorable return new instance of Writer which handle escape sequence from File. +// NewColorable returns new instance of Writer which handles escape sequence from File. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") @@ -106,12 +106,12 @@ func NewColorable(file *os.File) io.Writer { return file } -// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return NewColorable(os.Stdout) } -// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return NewColorable(os.Stderr) } @@ -414,7 +414,15 @@ func doTitleSequence(er *bytes.Reader) error { return nil } -// Write write data on console +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console func (w *Writer) Write(data []byte) (n int, err error) { var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) @@ -500,7 +508,7 @@ loop: switch m { case 'A': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } @@ -508,7 +516,7 @@ loop: csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } @@ -516,7 +524,7 @@ loop: csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } @@ -524,7 +532,7 @@ loop: csbi.cursorPosition.x += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } @@ -557,6 +565,9 @@ loop: if err != nil { continue } + if n < 1 { + n = 1 + } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) @@ -635,6 +646,20 @@ loop: } procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod index 9d9f4248..ef3ca9d4 100644 --- a/vendor/github.com/mattn/go-colorable/go.mod +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -1,3 +1,3 @@ module github.com/mattn/go-colorable -require github.com/mattn/go-isatty v0.0.5 +require github.com/mattn/go-isatty v0.0.8 diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 9721e16f..95f2c6be 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -5,17 +5,17 @@ import ( "io" ) -// NonColorable hold writer but remove escape sequence. +// NonColorable holds writer but removes escape sequence. type NonColorable struct { out io.Writer } -// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. func NewNonColorable(w io.Writer) io.Writer { return &NonColorable{out: w} } -// Write write data on console +// Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) var bw [1]byte diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod index f310320c..a8ddf404 100644 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -1,3 +1,5 @@ module github.com/mattn/go-isatty -require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 +require golang.org/x/sys v0.0.0-20191008105621-543471e840be + +go 1.14 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum index 426c8973..c141fc53 100644 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -1,2 +1,4 @@ -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 00000000..d3567cb5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go deleted file mode 100644 index e004038e..00000000 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux -// +build !appengine - -package isatty - -import "golang.org/x/sys/unix" - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - return err == nil -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index f02849c5..ff714a37 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,4 +1,4 @@ -// +build appengine js +// +build appengine js nacl package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000..bc0a7092 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(fd) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000..453b025d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go index af51cbca..1fa86915 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -4,6 +4,7 @@ package isatty import ( + "errors" "strings" "syscall" "unicode/utf16" @@ -11,15 +12,18 @@ import ( ) const ( - fileNameInfo uintptr = 2 - fileTypePipe = 3 + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 ) var ( kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") ) func init() { @@ -45,7 +49,10 @@ func isCygwinPipeName(name string) bool { return false } - if token[0] != `\msys` && token[0] != `\cygwin` { + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { return false } @@ -68,11 +75,35 @@ func isCygwinPipeName(name string) bool { return true } +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + // IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. func IsCygwinTerminal(fd uintptr) bool { if procGetFileInformationByHandleEx == nil { - return false + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) } // Cygwin/msys's pty is a pipe. diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml deleted file mode 100644 index b8599b3a..00000000 --- a/vendor/github.com/mitchellh/cli/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -sudo: false - -language: go - -go: - - "1.8" - - "1.9" - - "1.10" - -branches: - only: - - master - -script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile deleted file mode 100644 index 4874b008..00000000 --- a/vendor/github.com/mitchellh/cli/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code -test: - go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) - -# testrace runs the race checker -testrace: - go list $(TEST) | xargs -n1 go test -race $(TESTARGS) - -# updatedeps installs all the dependencies to run and build -updatedeps: - go list ./... \ - | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ - | grep -v github.com/mitchellh/cli \ - | xargs go get -f -u -v - -.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md deleted file mode 100644 index 8f02cdd0..00000000 --- a/vendor/github.com/mitchellh/cli/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) - -cli is a library for implementing powerful command-line interfaces in Go. -cli is the library that powers the CLI for -[Packer](https://github.com/mitchellh/packer), -[Serf](https://github.com/hashicorp/serf), -[Consul](https://github.com/hashicorp/consul), -[Vault](https://github.com/hashicorp/vault), -[Terraform](https://github.com/hashicorp/terraform), and -[Nomad](https://github.com/hashicorp/nomad). - -## Features - -* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. - -* Support for nested subcommands such as `cli foo bar`. - -* Optional support for default subcommands so `cli` does something - other than error. - -* Support for shell autocompletion of subcommands, flags, and arguments - with callbacks in Go. You don't need to write any shell code. - -* Automatic help generation for listing subcommands - -* Automatic help flag recognition of `-h`, `--help`, etc. - -* Automatic version flag recognition of `-v`, `--version`. - -* Helpers for interacting with the terminal, such as outputting information, - asking for input, etc. These are optional, you can always interact with the - terminal however you choose. - -* Use of Go interfaces/types makes augmenting various parts of the library a - piece of cake. - -## Example - -Below is a simple example of creating and running a CLI - -```go -package main - -import ( - "log" - "os" - - "github.com/mitchellh/cli" -) - -func main() { - c := cli.NewCLI("app", "1.0.0") - c.Args = os.Args[1:] - c.Commands = map[string]cli.CommandFactory{ - "foo": fooCommandFactory, - "bar": barCommandFactory, - } - - exitStatus, err := c.Run() - if err != nil { - log.Println(err) - } - - os.Exit(exitStatus) -} -``` - diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go deleted file mode 100644 index 3bec6258..00000000 --- a/vendor/github.com/mitchellh/cli/autocomplete.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "github.com/posener/complete/cmd/install" -) - -// autocompleteInstaller is an interface to be implemented to perform the -// autocomplete installation and uninstallation with a CLI. -// -// This interface is not exported because it only exists for unit tests -// to be able to test that the installation is called properly. -type autocompleteInstaller interface { - Install(string) error - Uninstall(string) error -} - -// realAutocompleteInstaller uses the real install package to do the -// install/uninstall. -type realAutocompleteInstaller struct{} - -func (i *realAutocompleteInstaller) Install(cmd string) error { - return install.Install(cmd) -} - -func (i *realAutocompleteInstaller) Uninstall(cmd string) error { - return install.Uninstall(cmd) -} - -// mockAutocompleteInstaller is used for tests to record the install/uninstall. -type mockAutocompleteInstaller struct { - InstallCalled bool - UninstallCalled bool -} - -func (i *mockAutocompleteInstaller) Install(cmd string) error { - i.InstallCalled = true - return nil -} - -func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { - i.UninstallCalled = true - return nil -} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go deleted file mode 100644 index c2dbe55a..00000000 --- a/vendor/github.com/mitchellh/cli/cli.go +++ /dev/null @@ -1,720 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "regexp" - "sort" - "strings" - "sync" - "text/template" - - "github.com/armon/go-radix" - "github.com/posener/complete" -) - -// CLI contains the state necessary to run subcommands and parse the -// command line arguments. -// -// CLI also supports nested subcommands, such as "cli foo bar". To use -// nested subcommands, the key in the Commands mapping below contains the -// full subcommand. In this example, it would be "foo bar". -// -// If you use a CLI with nested subcommands, some semantics change due to -// ambiguities: -// -// * We use longest prefix matching to find a matching subcommand. This -// means if you register "foo bar" and the user executes "cli foo qux", -// the "foo" command will be executed with the arg "qux". It is up to -// you to handle these args. One option is to just return the special -// help return code `RunResultHelp` to display help and exit. -// -// * The help flag "-h" or "-help" will look at all args to determine -// the help function. For example: "otto apps list -h" will show the -// help for "apps list" but "otto apps -h" will show it for "apps". -// In the normal CLI, only the first subcommand is used. -// -// * The help flag will list any subcommands that a command takes -// as well as the command's help itself. If there are no subcommands, -// it will note this. If the CLI itself has no subcommands, this entire -// section is omitted. -// -// * Any parent commands that don't exist are automatically created as -// no-op commands that just show help for other subcommands. For example, -// if you only register "foo bar", then "foo" is automatically created. -// -type CLI struct { - // Args is the list of command-line arguments received excluding - // the name of the app. For example, if the command "./cli foo bar" - // was invoked, then Args should be []string{"foo", "bar"}. - Args []string - - // Commands is a mapping of subcommand names to a factory function - // for creating that Command implementation. If there is a command - // with a blank string "", then it will be used as the default command - // if no subcommand is specified. - // - // If the key has a space in it, this will create a nested subcommand. - // For example, if the key is "foo bar", then to access it our CLI - // must be accessed with "./cli foo bar". See the docs for CLI for - // notes on how this changes some other behavior of the CLI as well. - // - // The factory should be as cheap as possible, ideally only allocating - // a struct. The factory may be called multiple times in the course - // of a command execution and certain events such as help require the - // instantiation of all commands. Expensive initialization should be - // deferred to function calls within the interface implementation. - Commands map[string]CommandFactory - - // HiddenCommands is a list of commands that are "hidden". Hidden - // commands are not given to the help function callback and do not - // show up in autocomplete. The values in the slice should be equivalent - // to the keys in the command map. - HiddenCommands []string - - // Name defines the name of the CLI. - Name string - - // Version of the CLI. - Version string - - // Autocomplete enables or disables subcommand auto-completion support. - // This is enabled by default when NewCLI is called. Otherwise, this - // must enabled explicitly. - // - // Autocomplete requires the "Name" option to be set on CLI. This name - // should be set exactly to the binary name that is autocompleted. - // - // Autocompletion is supported via the github.com/posener/complete - // library. This library supports bash, zsh and fish. To add support - // for other shells, please see that library. - // - // AutocompleteInstall and AutocompleteUninstall are the global flag - // names for installing and uninstalling the autocompletion handlers - // for the user's shell. The flag should omit the hyphen(s) in front of - // the value. Both single and double hyphens will automatically be supported - // for the flag name. These default to `autocomplete-install` and - // `autocomplete-uninstall` respectively. - // - // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- - // complete flags like -help and -version are added to the output. - // - // AutocompleteGlobalFlags are a mapping of global flags for - // autocompletion. The help and version flags are automatically added. - Autocomplete bool - AutocompleteInstall string - AutocompleteUninstall string - AutocompleteNoDefaultFlags bool - AutocompleteGlobalFlags complete.Flags - autocompleteInstaller autocompleteInstaller // For tests - - // HelpFunc and HelpWriter are used to output help information, if - // requested. - // - // HelpFunc is the function called to generate the generic help - // text that is shown if help must be shown for the CLI that doesn't - // pertain to a specific command. - // - // HelpWriter is the Writer where the help text is outputted to. If - // not specified, it will default to Stderr. - HelpFunc HelpFunc - HelpWriter io.Writer - - //--------------------------------------------------------------- - // Internal fields set automatically - - once sync.Once - autocomplete *complete.Complete - commandTree *radix.Tree - commandNested bool - commandHidden map[string]struct{} - subcommand string - subcommandArgs []string - topFlags []string - - // These are true when special global flags are set. We can/should - // probably use a bitset for this one day. - isHelp bool - isVersion bool - isAutocompleteInstall bool - isAutocompleteUninstall bool -} - -// NewClI returns a new CLI instance with sensible defaults. -func NewCLI(app, version string) *CLI { - return &CLI{ - Name: app, - Version: version, - HelpFunc: BasicHelpFunc(app), - Autocomplete: true, - } - -} - -// IsHelp returns whether or not the help flag is present within the -// arguments. -func (c *CLI) IsHelp() bool { - c.once.Do(c.init) - return c.isHelp -} - -// IsVersion returns whether or not the version flag is present within the -// arguments. -func (c *CLI) IsVersion() bool { - c.once.Do(c.init) - return c.isVersion -} - -// Run runs the actual CLI based on the arguments given. -func (c *CLI) Run() (int, error) { - c.once.Do(c.init) - - // If this is a autocompletion request, satisfy it. This must be called - // first before anything else since its possible to be autocompleting - // -help or -version or other flags and we want to show completions - // and not actually write the help or version. - if c.Autocomplete && c.autocomplete.Complete() { - return 0, nil - } - - // Just show the version and exit if instructed. - if c.IsVersion() && c.Version != "" { - c.HelpWriter.Write([]byte(c.Version + "\n")) - return 0, nil - } - - // Just print the help when only '-h' or '--help' is passed. - if c.IsHelp() && c.Subcommand() == "" { - c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) - return 0, nil - } - - // If we're attempting to install or uninstall autocomplete then handle - if c.Autocomplete { - // Autocomplete requires the "Name" to be set so that we know what - // command to setup the autocomplete on. - if c.Name == "" { - return 1, fmt.Errorf( - "internal error: CLI.Name must be specified for autocomplete to work") - } - - // If both install and uninstall flags are specified, then error - if c.isAutocompleteInstall && c.isAutocompleteUninstall { - return 1, fmt.Errorf( - "Either the autocomplete install or uninstall flag may " + - "be specified, but not both.") - } - - // If the install flag is specified, perform the install or uninstall - if c.isAutocompleteInstall { - if err := c.autocompleteInstaller.Install(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - - if c.isAutocompleteUninstall { - if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { - return 1, err - } - - return 0, nil - } - } - - // Attempt to get the factory function for creating the command - // implementation. If the command is invalid or blank, it is an error. - raw, ok := c.commandTree.Get(c.Subcommand()) - if !ok { - c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) - return 127, nil - } - - command, err := raw.(CommandFactory)() - if err != nil { - return 1, err - } - - // If we've been instructed to just print the help, then print it - if c.IsHelp() { - c.commandHelp(command) - return 0, nil - } - - // If there is an invalid flag, then error - if len(c.topFlags) > 0 { - c.HelpWriter.Write([]byte( - "Invalid flags before the subcommand. If these flags are for\n" + - "the subcommand, please put them after the subcommand.\n\n")) - c.commandHelp(command) - return 1, nil - } - - code := command.Run(c.SubcommandArgs()) - if code == RunResultHelp { - // Requesting help - c.commandHelp(command) - return 1, nil - } - - return code, nil -} - -// Subcommand returns the subcommand that the CLI would execute. For -// example, a CLI from "--version version --help" would return a Subcommand -// of "version" -func (c *CLI) Subcommand() string { - c.once.Do(c.init) - return c.subcommand -} - -// SubcommandArgs returns the arguments that will be passed to the -// subcommand. -func (c *CLI) SubcommandArgs() []string { - c.once.Do(c.init) - return c.subcommandArgs -} - -// subcommandParent returns the parent of this subcommand, if there is one. -// If there isn't on, "" is returned. -func (c *CLI) subcommandParent() string { - // Get the subcommand, if it is "" alread just return - sub := c.Subcommand() - if sub == "" { - return sub - } - - // Clear any trailing spaces and find the last space - sub = strings.TrimRight(sub, " ") - idx := strings.LastIndex(sub, " ") - - if idx == -1 { - // No space means our parent is root - return "" - } - - return sub[:idx] -} - -func (c *CLI) init() { - if c.HelpFunc == nil { - c.HelpFunc = BasicHelpFunc("app") - - if c.Name != "" { - c.HelpFunc = BasicHelpFunc(c.Name) - } - } - - if c.HelpWriter == nil { - c.HelpWriter = os.Stderr - } - - // Build our hidden commands - if len(c.HiddenCommands) > 0 { - c.commandHidden = make(map[string]struct{}) - for _, h := range c.HiddenCommands { - c.commandHidden[h] = struct{}{} - } - } - - // Build our command tree - c.commandTree = radix.New() - c.commandNested = false - for k, v := range c.Commands { - k = strings.TrimSpace(k) - c.commandTree.Insert(k, v) - if strings.ContainsRune(k, ' ') { - c.commandNested = true - } - } - - // Go through the key and fill in any missing parent commands - if c.commandNested { - var walkFn radix.WalkFn - toInsert := make(map[string]struct{}) - walkFn = func(k string, raw interface{}) bool { - idx := strings.LastIndex(k, " ") - if idx == -1 { - // If there is no space, just ignore top level commands - return false - } - - // Trim up to that space so we can get the expected parent - k = k[:idx] - if _, ok := c.commandTree.Get(k); ok { - // Yay we have the parent! - return false - } - - // We're missing the parent, so let's insert this - toInsert[k] = struct{}{} - - // Call the walk function recursively so we check this one too - return walkFn(k, nil) - } - - // Walk! - c.commandTree.Walk(walkFn) - - // Insert any that we're missing - for k := range toInsert { - var f CommandFactory = func() (Command, error) { - return &MockCommand{ - HelpText: "This command is accessed by using one of the subcommands below.", - RunResult: RunResultHelp, - }, nil - } - - c.commandTree.Insert(k, f) - } - } - - // Setup autocomplete if we have it enabled. We have to do this after - // the command tree is setup so we can use the radix tree to easily find - // all subcommands. - if c.Autocomplete { - c.initAutocomplete() - } - - // Process the args - c.processArgs() -} - -func (c *CLI) initAutocomplete() { - if c.AutocompleteInstall == "" { - c.AutocompleteInstall = defaultAutocompleteInstall - } - - if c.AutocompleteUninstall == "" { - c.AutocompleteUninstall = defaultAutocompleteUninstall - } - - if c.autocompleteInstaller == nil { - c.autocompleteInstaller = &realAutocompleteInstaller{} - } - - // Build the root command - cmd := c.initAutocompleteSub("") - - // For the root, we add the global flags to the "Flags". This way - // they don't show up on every command. - if !c.AutocompleteNoDefaultFlags { - cmd.Flags = map[string]complete.Predictor{ - "-" + c.AutocompleteInstall: complete.PredictNothing, - "-" + c.AutocompleteUninstall: complete.PredictNothing, - "-help": complete.PredictNothing, - "-version": complete.PredictNothing, - } - } - cmd.GlobalFlags = c.AutocompleteGlobalFlags - - c.autocomplete = complete.New(c.Name, cmd) -} - -// initAutocompleteSub creates the complete.Command for a subcommand with -// the given prefix. This will continue recursively for all subcommands. -// The prefix "" (empty string) can be used for the root command. -func (c *CLI) initAutocompleteSub(prefix string) complete.Command { - var cmd complete.Command - walkFn := func(k string, raw interface{}) bool { - // Ignore the empty key which can be present for default commands. - if k == "" { - return false - } - - // Keep track of the full key so that we can nest further if necessary - fullKey := k - - if len(prefix) > 0 { - // If we have a prefix, trim the prefix + 1 (for the space) - // Example: turns "sub one" to "one" with prefix "sub" - k = k[len(prefix)+1:] - } - - if idx := strings.Index(k, " "); idx >= 0 { - // If there is a space, we trim up to the space. This turns - // "sub sub2 sub3" into "sub". The prefix trim above will - // trim our current depth properly. - k = k[:idx] - } - - if _, ok := cmd.Sub[k]; ok { - // If we already tracked this subcommand then ignore - return false - } - - // If the command is hidden, don't record it at all - if _, ok := c.commandHidden[fullKey]; ok { - return false - } - - if cmd.Sub == nil { - cmd.Sub = complete.Commands(make(map[string]complete.Command)) - } - subCmd := c.initAutocompleteSub(fullKey) - - // Instantiate the command so that we can check if the command is - // a CommandAutocomplete implementation. If there is an error - // creating the command, we just ignore it since that will be caught - // later. - impl, err := raw.(CommandFactory)() - if err != nil { - impl = nil - } - - // Check if it implements ComandAutocomplete. If so, setup the autocomplete - if c, ok := impl.(CommandAutocomplete); ok { - subCmd.Args = c.AutocompleteArgs() - subCmd.Flags = c.AutocompleteFlags() - } - - cmd.Sub[k] = subCmd - return false - } - - walkPrefix := prefix - if walkPrefix != "" { - walkPrefix += " " - } - - c.commandTree.WalkPrefix(walkPrefix, walkFn) - return cmd -} - -func (c *CLI) commandHelp(command Command) { - // Get the template to use - tpl := strings.TrimSpace(defaultHelpTemplate) - if t, ok := command.(CommandHelpTemplate); ok { - tpl = t.HelpTemplate() - } - if !strings.HasSuffix(tpl, "\n") { - tpl += "\n" - } - - // Parse it - t, err := template.New("root").Parse(tpl) - if err != nil { - t = template.Must(template.New("root").Parse(fmt.Sprintf( - "Internal error! Failed to parse command help template: %s\n", err))) - } - - // Template data - data := map[string]interface{}{ - "Name": c.Name, - "Help": command.Help(), - } - - // Build subcommand list if we have it - var subcommandsTpl []map[string]interface{} - if c.commandNested { - // Get the matching keys - subcommands := c.helpCommands(c.Subcommand()) - keys := make([]string, 0, len(subcommands)) - for k := range subcommands { - keys = append(keys, k) - } - - // Sort the keys - sort.Strings(keys) - - // Figure out the padding length - var longest int - for _, k := range keys { - if v := len(k); v > longest { - longest = v - } - } - - // Go through and create their structures - subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) - for _, k := range keys { - // Get the command - raw, ok := subcommands[k] - if !ok { - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Error getting subcommand %q", k))) - } - sub, err := raw() - if err != nil { - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Error instantiating %q: %s", k, err))) - } - - // Find the last space and make sure we only include that last part - name := k - if idx := strings.LastIndex(k, " "); idx > -1 { - name = name[idx+1:] - } - - subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ - "Name": name, - "NameAligned": name + strings.Repeat(" ", longest-len(k)), - "Help": sub.Help(), - "Synopsis": sub.Synopsis(), - }) - } - } - data["Subcommands"] = subcommandsTpl - - // Write - err = t.Execute(c.HelpWriter, data) - if err == nil { - return - } - - // An error, just output... - c.HelpWriter.Write([]byte(fmt.Sprintf( - "Internal error rendering help: %s", err))) -} - -// helpCommands returns the subcommands for the HelpFunc argument. -// This will only contain immediate subcommands. -func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { - // If our prefix isn't empty, make sure it ends in ' ' - if prefix != "" && prefix[len(prefix)-1] != ' ' { - prefix += " " - } - - // Get all the subkeys of this command - var keys []string - c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { - // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" - if !strings.Contains(k[len(prefix):], " ") { - keys = append(keys, k) - } - - return false - }) - - // For each of the keys return that in the map - result := make(map[string]CommandFactory, len(keys)) - for _, k := range keys { - raw, ok := c.commandTree.Get(k) - if !ok { - // We just got it via WalkPrefix above, so we just panic - panic("not found: " + k) - } - - // If this is a hidden command, don't show it - if _, ok := c.commandHidden[k]; ok { - continue - } - - result[k] = raw.(CommandFactory) - } - - return result -} - -func (c *CLI) processArgs() { - for i, arg := range c.Args { - if arg == "--" { - break - } - - // Check for help flags. - if arg == "-h" || arg == "-help" || arg == "--help" { - c.isHelp = true - continue - } - - // Check for autocomplete flags - if c.Autocomplete { - if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { - c.isAutocompleteInstall = true - continue - } - - if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { - c.isAutocompleteUninstall = true - continue - } - } - - if c.subcommand == "" { - // Check for version flags if not in a subcommand. - if arg == "-v" || arg == "-version" || arg == "--version" { - c.isVersion = true - continue - } - - if arg != "" && arg[0] == '-' { - // Record the arg... - c.topFlags = append(c.topFlags, arg) - } - } - - // If we didn't find a subcommand yet and this is the first non-flag - // argument, then this is our subcommand. - if c.subcommand == "" && arg != "" && arg[0] != '-' { - c.subcommand = arg - if c.commandNested { - // If the command has a space in it, then it is invalid. - // Set a blank command so that it fails. - if strings.ContainsRune(arg, ' ') { - c.subcommand = "" - return - } - - // Determine the argument we look to to end subcommands. - // We look at all arguments until one has a space. This - // disallows commands like: ./cli foo "bar baz". An argument - // with a space is always an argument. - j := 0 - for k, v := range c.Args[i:] { - if strings.ContainsRune(v, ' ') { - break - } - - j = i + k + 1 - } - - // Nested CLI, the subcommand is actually the entire - // arg list up to a flag that is still a valid subcommand. - searchKey := strings.Join(c.Args[i:j], " ") - k, _, ok := c.commandTree.LongestPrefix(searchKey) - if ok { - // k could be a prefix that doesn't contain the full - // command such as "foo" instead of "foobar", so we - // need to verify that we have an entire key. To do that, - // we look for an ending in a space or an end of string. - reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) - if reVerify.MatchString(searchKey) { - c.subcommand = k - i += strings.Count(k, " ") - } - } - } - - // The remaining args the subcommand arguments - c.subcommandArgs = c.Args[i+1:] - } - } - - // If we never found a subcommand and support a default command, then - // switch to using that. - if c.subcommand == "" { - if _, ok := c.Commands[""]; ok { - args := c.topFlags - args = append(args, c.subcommandArgs...) - c.topFlags = nil - c.subcommandArgs = args - } - } -} - -// defaultAutocompleteInstall and defaultAutocompleteUninstall are the -// default values for the autocomplete install and uninstall flags. -const defaultAutocompleteInstall = "autocomplete-install" -const defaultAutocompleteUninstall = "autocomplete-uninstall" - -const defaultHelpTemplate = ` -{{.Help}}{{if gt (len .Subcommands) 0}} - -Subcommands: -{{- range $value := .Subcommands }} - {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} -{{- end }} -` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go deleted file mode 100644 index bed11faf..00000000 --- a/vendor/github.com/mitchellh/cli/command.go +++ /dev/null @@ -1,67 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -const ( - // RunResultHelp is a value that can be returned from Run to signal - // to the CLI to render the help output. - RunResultHelp = -18511 -) - -// A command is a runnable sub-command of a CLI. -type Command interface { - // Help should return long-form help text that includes the command-line - // usage, a brief few sentences explaining the function of the command, - // and the complete list of flags the command accepts. - Help() string - - // Run should run the actual command with the given CLI instance and - // command-line arguments. It should return the exit status when it is - // finished. - // - // There are a handful of special exit codes this can return documented - // above that change behavior. - Run(args []string) int - - // Synopsis should return a one-line, short synopsis of the command. - // This should be less than 50 characters ideally. - Synopsis() string -} - -// CommandAutocomplete is an extension of Command that enables fine-grained -// autocompletion. Subcommand autocompletion will work even if this interface -// is not implemented. By implementing this interface, more advanced -// autocompletion is enabled. -type CommandAutocomplete interface { - // AutocompleteArgs returns the argument predictor for this command. - // If argument completion is not supported, this should return - // complete.PredictNothing. - AutocompleteArgs() complete.Predictor - - // AutocompleteFlags returns a mapping of supported flags and autocomplete - // options for this command. The map key for the Flags map should be the - // complete flag such as "-foo" or "--foo". - AutocompleteFlags() complete.Flags -} - -// CommandHelpTemplate is an extension of Command that also has a function -// for returning a template for the help rather than the help itself. In -// this scenario, both Help and HelpTemplate should be implemented. -// -// If CommandHelpTemplate isn't implemented, the Help is output as-is. -type CommandHelpTemplate interface { - // HelpTemplate is the template in text/template format to use for - // displaying the Help. The keys available are: - // - // * ".Help" - The help text itself - // * ".Subcommands" - // - HelpTemplate() string -} - -// CommandFactory is a type of function that is a factory for commands. -// We need a factory because we may need to setup some state on the -// struct that implements the command itself. -type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go deleted file mode 100644 index 7a584b7e..00000000 --- a/vendor/github.com/mitchellh/cli/command_mock.go +++ /dev/null @@ -1,63 +0,0 @@ -package cli - -import ( - "github.com/posener/complete" -) - -// MockCommand is an implementation of Command that can be used for tests. -// It is publicly exported from this package in case you want to use it -// externally. -type MockCommand struct { - // Settable - HelpText string - RunResult int - SynopsisText string - - // Set by the command - RunCalled bool - RunArgs []string -} - -func (c *MockCommand) Help() string { - return c.HelpText -} - -func (c *MockCommand) Run(args []string) int { - c.RunCalled = true - c.RunArgs = args - - return c.RunResult -} - -func (c *MockCommand) Synopsis() string { - return c.SynopsisText -} - -// MockCommandAutocomplete is an implementation of CommandAutocomplete. -type MockCommandAutocomplete struct { - MockCommand - - // Settable - AutocompleteArgsValue complete.Predictor - AutocompleteFlagsValue complete.Flags -} - -func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { - return c.AutocompleteArgsValue -} - -func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { - return c.AutocompleteFlagsValue -} - -// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. -type MockCommandHelpTemplate struct { - MockCommand - - // Settable - HelpTemplateText string -} - -func (c *MockCommandHelpTemplate) HelpTemplate() string { - return c.HelpTemplateText -} diff --git a/vendor/github.com/mitchellh/cli/go.mod b/vendor/github.com/mitchellh/cli/go.mod deleted file mode 100644 index 675325ff..00000000 --- a/vendor/github.com/mitchellh/cli/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/mitchellh/cli - -require ( - github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 - github.com/bgentry/speakeasy v0.1.0 - github.com/fatih/color v1.7.0 - github.com/hashicorp/go-multierror v1.0.0 // indirect - github.com/mattn/go-colorable v0.0.9 // indirect - github.com/mattn/go-isatty v0.0.3 - github.com/posener/complete v1.1.1 - golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc // indirect -) diff --git a/vendor/github.com/mitchellh/cli/go.sum b/vendor/github.com/mitchellh/cli/go.sum deleted file mode 100644 index 03708752..00000000 --- a/vendor/github.com/mitchellh/cli/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= -github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU= -github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go deleted file mode 100644 index f5ca58f5..00000000 --- a/vendor/github.com/mitchellh/cli/help.go +++ /dev/null @@ -1,79 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "log" - "sort" - "strings" -) - -// HelpFunc is the type of the function that is responsible for generating -// the help output when the CLI must show the general help text. -type HelpFunc func(map[string]CommandFactory) string - -// BasicHelpFunc generates some basic help output that is usually good enough -// for most CLI applications. -func BasicHelpFunc(app string) HelpFunc { - return func(commands map[string]CommandFactory) string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf( - "Usage: %s [--version] [--help] []\n\n", - app)) - buf.WriteString("Available commands are:\n") - - // Get the list of keys so we can sort them, and also get the maximum - // key length so they can be aligned properly. - keys := make([]string, 0, len(commands)) - maxKeyLen := 0 - for key := range commands { - if len(key) > maxKeyLen { - maxKeyLen = len(key) - } - - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - commandFunc, ok := commands[key] - if !ok { - // This should never happen since we JUST built the list of - // keys. - panic("command not found: " + key) - } - - command, err := commandFunc() - if err != nil { - log.Printf("[ERR] cli: Command '%s' failed to load: %s", - key, err) - continue - } - - key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) - buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) - } - - return buf.String() - } -} - -// FilteredHelpFunc will filter the commands to only include the keys -// in the include parameter. -func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { - return func(commands map[string]CommandFactory) string { - set := make(map[string]struct{}) - for _, k := range include { - set[k] = struct{}{} - } - - filtered := make(map[string]CommandFactory) - for k, f := range commands { - if _, ok := set[k]; ok { - filtered[k] = f - } - } - - return f(filtered) - } -} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go deleted file mode 100644 index a2d6f94f..00000000 --- a/vendor/github.com/mitchellh/cli/ui.go +++ /dev/null @@ -1,187 +0,0 @@ -package cli - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/signal" - "strings" - - "github.com/bgentry/speakeasy" - "github.com/mattn/go-isatty" -) - -// Ui is an interface for interacting with the terminal, or "interface" -// of a CLI. This abstraction doesn't have to be used, but helps provide -// a simple, layerable way to manage user interactions. -type Ui interface { - // Ask asks the user for input using the given query. The response is - // returned as the given string, or an error. - Ask(string) (string, error) - - // AskSecret asks the user for input using the given query, but does not echo - // the keystrokes to the terminal. - AskSecret(string) (string, error) - - // Output is called for normal standard output. - Output(string) - - // Info is called for information related to the previous output. - // In general this may be the exact same as Output, but this gives - // Ui implementors some flexibility with output formats. - Info(string) - - // Error is used for any error messages that might appear on standard - // error. - Error(string) - - // Warn is used for any warning messages that might appear on standard - // error. - Warn(string) -} - -// BasicUi is an implementation of Ui that just outputs to the given -// writer. This UI is not threadsafe by default, but you can wrap it -// in a ConcurrentUi to make it safe. -type BasicUi struct { - Reader io.Reader - Writer io.Writer - ErrorWriter io.Writer -} - -func (u *BasicUi) Ask(query string) (string, error) { - return u.ask(query, false) -} - -func (u *BasicUi) AskSecret(query string) (string, error) { - return u.ask(query, true) -} - -func (u *BasicUi) ask(query string, secret bool) (string, error) { - if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { - return "", err - } - - // Register for interrupts so that we can catch it and immediately - // return... - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, os.Interrupt) - defer signal.Stop(sigCh) - - // Ask for input in a go-routine so that we can ignore it. - errCh := make(chan error, 1) - lineCh := make(chan string, 1) - go func() { - var line string - var err error - if secret && isatty.IsTerminal(os.Stdin.Fd()) { - line, err = speakeasy.Ask("") - } else { - r := bufio.NewReader(u.Reader) - line, err = r.ReadString('\n') - } - if err != nil { - errCh <- err - return - } - - lineCh <- strings.TrimRight(line, "\r\n") - }() - - select { - case err := <-errCh: - return "", err - case line := <-lineCh: - return line, nil - case <-sigCh: - // Print a newline so that any further output starts properly - // on a new line. - fmt.Fprintln(u.Writer) - - return "", errors.New("interrupted") - } -} - -func (u *BasicUi) Error(message string) { - w := u.Writer - if u.ErrorWriter != nil { - w = u.ErrorWriter - } - - fmt.Fprint(w, message) - fmt.Fprint(w, "\n") -} - -func (u *BasicUi) Info(message string) { - u.Output(message) -} - -func (u *BasicUi) Output(message string) { - fmt.Fprint(u.Writer, message) - fmt.Fprint(u.Writer, "\n") -} - -func (u *BasicUi) Warn(message string) { - u.Error(message) -} - -// PrefixedUi is an implementation of Ui that prefixes messages. -type PrefixedUi struct { - AskPrefix string - AskSecretPrefix string - OutputPrefix string - InfoPrefix string - ErrorPrefix string - WarnPrefix string - Ui Ui -} - -func (u *PrefixedUi) Ask(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskPrefix, query) - } - - return u.Ui.Ask(query) -} - -func (u *PrefixedUi) AskSecret(query string) (string, error) { - if query != "" { - query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) - } - - return u.Ui.AskSecret(query) -} - -func (u *PrefixedUi) Error(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) - } - - u.Ui.Error(message) -} - -func (u *PrefixedUi) Info(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.InfoPrefix, message) - } - - u.Ui.Info(message) -} - -func (u *PrefixedUi) Output(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.OutputPrefix, message) - } - - u.Ui.Output(message) -} - -func (u *PrefixedUi) Warn(message string) { - if message != "" { - message = fmt.Sprintf("%s%s", u.WarnPrefix, message) - } - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go deleted file mode 100644 index b0ec4484..00000000 --- a/vendor/github.com/mitchellh/cli/ui_colored.go +++ /dev/null @@ -1,73 +0,0 @@ -package cli - -import ( - "github.com/fatih/color" -) - -const ( - noColor = -1 -) - -// UiColor is a posix shell color code to use. -type UiColor struct { - Code int - Bold bool -} - -// A list of colors that are useful. These are all non-bolded by default. -var ( - UiColorNone UiColor = UiColor{noColor, false} - UiColorRed = UiColor{int(color.FgHiRed), false} - UiColorGreen = UiColor{int(color.FgHiGreen), false} - UiColorYellow = UiColor{int(color.FgHiYellow), false} - UiColorBlue = UiColor{int(color.FgHiBlue), false} - UiColorMagenta = UiColor{int(color.FgHiMagenta), false} - UiColorCyan = UiColor{int(color.FgHiCyan), false} -) - -// ColoredUi is a Ui implementation that colors its output according -// to the given color schemes for the given type of output. -type ColoredUi struct { - OutputColor UiColor - InfoColor UiColor - ErrorColor UiColor - WarnColor UiColor - Ui Ui -} - -func (u *ColoredUi) Ask(query string) (string, error) { - return u.Ui.Ask(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) AskSecret(query string) (string, error) { - return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) -} - -func (u *ColoredUi) Output(message string) { - u.Ui.Output(u.colorize(message, u.OutputColor)) -} - -func (u *ColoredUi) Info(message string) { - u.Ui.Info(u.colorize(message, u.InfoColor)) -} - -func (u *ColoredUi) Error(message string) { - u.Ui.Error(u.colorize(message, u.ErrorColor)) -} - -func (u *ColoredUi) Warn(message string) { - u.Ui.Warn(u.colorize(message, u.WarnColor)) -} - -func (u *ColoredUi) colorize(message string, uc UiColor) string { - if uc.Code == noColor { - return message - } - - attr := []color.Attribute{color.Attribute(uc.Code)} - if uc.Bold { - attr = append(attr, color.Bold) - } - - return color.New(attr...).SprintFunc()(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go deleted file mode 100644 index b4f4dbfa..00000000 --- a/vendor/github.com/mitchellh/cli/ui_concurrent.go +++ /dev/null @@ -1,54 +0,0 @@ -package cli - -import ( - "sync" -) - -// ConcurrentUi is a wrapper around a Ui interface (and implements that -// interface) making the underlying Ui concurrency safe. -type ConcurrentUi struct { - Ui Ui - l sync.Mutex -} - -func (u *ConcurrentUi) Ask(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.Ask(query) -} - -func (u *ConcurrentUi) AskSecret(query string) (string, error) { - u.l.Lock() - defer u.l.Unlock() - - return u.Ui.AskSecret(query) -} - -func (u *ConcurrentUi) Error(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Error(message) -} - -func (u *ConcurrentUi) Info(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Info(message) -} - -func (u *ConcurrentUi) Output(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Output(message) -} - -func (u *ConcurrentUi) Warn(message string) { - u.l.Lock() - defer u.l.Unlock() - - u.Ui.Warn(message) -} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go deleted file mode 100644 index 0bfe0a19..00000000 --- a/vendor/github.com/mitchellh/cli/ui_mock.go +++ /dev/null @@ -1,111 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "io" - "sync" -) - -// NewMockUi returns a fully initialized MockUi instance -// which is safe for concurrent use. -func NewMockUi() *MockUi { - m := new(MockUi) - m.once.Do(m.init) - return m -} - -// MockUi is a mock UI that is used for tests and is exported publicly -// for use in external tests if needed as well. Do not instantite this -// directly since the buffers will be initialized on the first write. If -// there is no write then you will get a nil panic. Please use the -// NewMockUi() constructor function instead. You can fix your code with -// -// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go -type MockUi struct { - InputReader io.Reader - ErrorWriter *syncBuffer - OutputWriter *syncBuffer - - once sync.Once -} - -func (u *MockUi) Ask(query string) (string, error) { - u.once.Do(u.init) - - var result string - fmt.Fprint(u.OutputWriter, query) - if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { - return "", err - } - - return result, nil -} - -func (u *MockUi) AskSecret(query string) (string, error) { - return u.Ask(query) -} - -func (u *MockUi) Error(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) Info(message string) { - u.Output(message) -} - -func (u *MockUi) Output(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.OutputWriter, message) - fmt.Fprint(u.OutputWriter, "\n") -} - -func (u *MockUi) Warn(message string) { - u.once.Do(u.init) - - fmt.Fprint(u.ErrorWriter, message) - fmt.Fprint(u.ErrorWriter, "\n") -} - -func (u *MockUi) init() { - u.ErrorWriter = new(syncBuffer) - u.OutputWriter = new(syncBuffer) -} - -type syncBuffer struct { - sync.RWMutex - b bytes.Buffer -} - -func (b *syncBuffer) Write(data []byte) (int, error) { - b.Lock() - defer b.Unlock() - return b.b.Write(data) -} - -func (b *syncBuffer) Read(data []byte) (int, error) { - b.RLock() - defer b.RUnlock() - return b.b.Read(data) -} - -func (b *syncBuffer) Reset() { - b.Lock() - b.b.Reset() - b.Unlock() -} - -func (b *syncBuffer) String() string { - return string(b.Bytes()) -} - -func (b *syncBuffer) Bytes() []byte { - b.RLock() - data := b.b.Bytes() - b.RUnlock() - return data -} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go deleted file mode 100644 index 1e1db3cf..00000000 --- a/vendor/github.com/mitchellh/cli/ui_writer.go +++ /dev/null @@ -1,18 +0,0 @@ -package cli - -// UiWriter is an io.Writer implementation that can be used with -// loggers that writes every line of log output data to a Ui at the -// Info level. -type UiWriter struct { - Ui Ui -} - -func (w *UiWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > 0 && p[n-1] == '\n' { - p = p[:n-1] - } - - w.Ui.Info(string(p)) - return n, nil -} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml deleted file mode 100644 index 74e286ae..00000000 --- a/vendor/github.com/mitchellh/colorstring/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/colorstring/LICENSE b/vendor/github.com/mitchellh/colorstring/LICENSE deleted file mode 100644 index 22985159..00000000 --- a/vendor/github.com/mitchellh/colorstring/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/colorstring/README.md b/vendor/github.com/mitchellh/colorstring/README.md deleted file mode 100644 index 0654d454..00000000 --- a/vendor/github.com/mitchellh/colorstring/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# colorstring [![Build Status](https://travis-ci.org/mitchellh/colorstring.svg)](https://travis-ci.org/mitchellh/colorstring) - -colorstring is a [Go](http://www.golang.org) library for outputting colored -strings to a console using a simple inline syntax in your string to specify -the color to print as. - -For example, the string `[blue]hello [red]world` would output the text -"hello world" in two colors. The API of colorstring allows for easily disabling -colors, adding aliases, etc. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/colorstring -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/colorstring). - -Usage is easy enough: - -```go -colorstring.Println("[blue]Hello [red]World!") -``` - -Additionally, the `Colorize` struct can be used to set options such as -custom colors, color disabling, etc. diff --git a/vendor/github.com/mitchellh/colorstring/colorstring.go b/vendor/github.com/mitchellh/colorstring/colorstring.go deleted file mode 100644 index 3de5b241..00000000 --- a/vendor/github.com/mitchellh/colorstring/colorstring.go +++ /dev/null @@ -1,244 +0,0 @@ -// colorstring provides functions for colorizing strings for terminal -// output. -package colorstring - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// Color colorizes your strings using the default settings. -// -// Strings given to Color should use the syntax `[color]` to specify the -// color for text following. For example: `[blue]Hello` will return "Hello" -// in blue. See DefaultColors for all the supported colors and attributes. -// -// If an unrecognized color is given, it is ignored and assumed to be part -// of the string. For example: `[hi]world` will result in "[hi]world". -// -// A color reset is appended to the end of every string. This will reset -// the color of following strings when you output this text to the same -// terminal session. -// -// If you want to customize any of this behavior, use the Colorize struct. -func Color(v string) string { - return def.Color(v) -} - -// ColorPrefix returns the color sequence that prefixes the given text. -// -// This is useful when wrapping text if you want to inherit the color -// of the wrapped text. For example, "[green]foo" will return "[green]". -// If there is no color sequence, then this will return "". -func ColorPrefix(v string) string { - return def.ColorPrefix(v) -} - -// Colorize colorizes your strings, giving you the ability to customize -// some of the colorization process. -// -// The options in Colorize can be set to customize colorization. If you're -// only interested in the defaults, just use the top Color function directly, -// which creates a default Colorize. -type Colorize struct { - // Colors maps a color string to the code for that color. The code - // is a string so that you can use more complex colors to set foreground, - // background, attributes, etc. For example, "boldblue" might be - // "1;34" - Colors map[string]string - - // If true, color attributes will be ignored. This is useful if you're - // outputting to a location that doesn't support colors and you just - // want the strings returned. - Disable bool - - // Reset, if true, will reset the color after each colorization by - // adding a reset code at the end. - Reset bool -} - -// Color colorizes a string according to the settings setup in the struct. -// -// For more details on the syntax, see the top-level Color function. -func (c *Colorize) Color(v string) string { - matches := parseRe.FindAllStringIndex(v, -1) - if len(matches) == 0 { - return v - } - - result := new(bytes.Buffer) - colored := false - m := []int{0, 0} - for _, nm := range matches { - // Write the text in between this match and the last - result.WriteString(v[m[1]:nm[0]]) - m = nm - - var replace string - if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { - colored = true - - if !c.Disable { - replace = fmt.Sprintf("\033[%sm", code) - } - } else { - replace = v[m[0]:m[1]] - } - - result.WriteString(replace) - } - result.WriteString(v[m[1]:]) - - if colored && c.Reset && !c.Disable { - // Write the clear byte at the end - result.WriteString("\033[0m") - } - - return result.String() -} - -// ColorPrefix returns the first color sequence that exists in this string. -// -// For example: "[green]foo" would return "[green]". If no color sequence -// exists, then "" is returned. This is especially useful when wrapping -// colored texts to inherit the color of the wrapped text. -func (c *Colorize) ColorPrefix(v string) string { - return prefixRe.FindString(strings.TrimSpace(v)) -} - -// DefaultColors are the default colors used when colorizing. -// -// If the color is surrounded in underscores, such as "_blue_", then that -// color will be used for the background color. -var DefaultColors map[string]string - -func init() { - DefaultColors = map[string]string{ - // Default foreground/background colors - "default": "39", - "_default_": "49", - - // Foreground colors - "black": "30", - "red": "31", - "green": "32", - "yellow": "33", - "blue": "34", - "magenta": "35", - "cyan": "36", - "light_gray": "37", - "dark_gray": "90", - "light_red": "91", - "light_green": "92", - "light_yellow": "93", - "light_blue": "94", - "light_magenta": "95", - "light_cyan": "96", - "white": "97", - - // Background colors - "_black_": "40", - "_red_": "41", - "_green_": "42", - "_yellow_": "43", - "_blue_": "44", - "_magenta_": "45", - "_cyan_": "46", - "_light_gray_": "47", - "_dark_gray_": "100", - "_light_red_": "101", - "_light_green_": "102", - "_light_yellow_": "103", - "_light_blue_": "104", - "_light_magenta_": "105", - "_light_cyan_": "106", - "_white_": "107", - - // Attributes - "bold": "1", - "dim": "2", - "underline": "4", - "blink_slow": "5", - "blink_fast": "6", - "invert": "7", - "hidden": "8", - - // Reset to reset everything to their defaults - "reset": "0", - "reset_bold": "21", - } - - def = Colorize{ - Colors: DefaultColors, - Reset: true, - } -} - -var def Colorize -var parseReRaw = `\[[a-z0-9_-]+\]` -var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) -var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) - -// Print is a convenience wrapper for fmt.Print with support for color codes. -// -// Print formats using the default formats for its operands and writes to -// standard output with support for color codes. Spaces are added between -// operands when neither is a string. It returns the number of bytes written -// and any write error encountered. -func Print(a string) (n int, err error) { - return fmt.Print(Color(a)) -} - -// Println is a convenience wrapper for fmt.Println with support for color -// codes. -// -// Println formats using the default formats for its operands and writes to -// standard output with support for color codes. Spaces are always added -// between operands and a newline is appended. It returns the number of bytes -// written and any write error encountered. -func Println(a string) (n int, err error) { - return fmt.Println(Color(a)) -} - -// Printf is a convenience wrapper for fmt.Printf with support for color codes. -// -// Printf formats according to a format specifier and writes to standard output -// with support for color codes. It returns the number of bytes written and any -// write error encountered. -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(Color(format), a...) -} - -// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. -// -// Fprint formats using the default formats for its operands and writes to w -// with support for color codes. Spaces are added between operands when neither -// is a string. It returns the number of bytes written and any write error -// encountered. -func Fprint(w io.Writer, a string) (n int, err error) { - return fmt.Fprint(w, Color(a)) -} - -// Fprintln is a convenience wrapper for fmt.Fprintln with support for color -// codes. -// -// Fprintln formats using the default formats for its operands and writes to w -// with support for color codes. Spaces are always added between operands and a -// newline is appended. It returns the number of bytes written and any write -// error encountered. -func Fprintln(w io.Writer, a string) (n int, err error) { - return fmt.Fprintln(w, Color(a)) -} - -// Fprintf is a convenience wrapper for fmt.Fprintf with support for color -// codes. -// -// Fprintf formats according to a format specifier and writes to w with support -// for color codes. It returns the number of bytes written and any write error -// encountered. -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, Color(format), a...) -} diff --git a/vendor/github.com/mitchellh/colorstring/go.mod b/vendor/github.com/mitchellh/colorstring/go.mod deleted file mode 100644 index 446ff8d3..00000000 --- a/vendor/github.com/mitchellh/colorstring/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/colorstring diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go index 204afb42..b05a49a6 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -22,6 +22,7 @@ type T interface { Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -31,10 +32,12 @@ type T interface { // RuntimeT implements T and can be instantiated and run at runtime to // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. Name and Skip methods are -// unimplemented noops. +// list to determine whether to exit yourself. +// +// Parallel does not do anything. type RuntimeT struct { - failed bool + failed bool + skipped bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -77,8 +80,26 @@ func (t *RuntimeT) Logf(format string, args ...interface{}) { log.Println(fmt.Sprintf(format, args...)) } -func (t *RuntimeT) Name() string { return "" } -func (t *RuntimeT) Skip(args ...interface{}) {} -func (t *RuntimeT) SkipNow() {} -func (t *RuntimeT) Skipf(format string, args ...interface{}) {} -func (t *RuntimeT) Skipped() bool { return false } +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Parallel() {} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go index 31b42cad..f09c066a 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -27,6 +27,7 @@ type T interface { Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -38,9 +39,11 @@ type T interface { // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors // list to determine whether to exit yourself. +// +// Parallel does not do anything. type RuntimeT struct { - skipped bool failed bool + skipped bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -87,6 +90,8 @@ func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Parallel() {} + func (t *RuntimeT) Skip(args ...interface{}) { log.Print(args...) t.SkipNow() diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE deleted file mode 100644 index a3866a29..00000000 --- a/vendor/github.com/mitchellh/hashstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md deleted file mode 100644 index 28ce45a3..00000000 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) - -hashstructure is a Go library for creating a unique hash value -for arbitrary values in Go. - -This can be used to key values in a hash (for use in a map, set, etc.) -that are complex. The most common use case is comparing two values without -sending data across the network, caching values locally (de-dup), and so on. - -## Features - - * Hash any arbitrary Go value, including complex types. - - * Tag a struct field to ignore it and not affect the hash value. - - * Tag a slice type struct field to treat it as a set where ordering - doesn't affect the hash code but the field itself is still taken into - account to create the hash value. - - * Optionally specify a custom hash function to optimize for speed, collision - avoidance for your data set, etc. - - * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, - allowing effective hashing of time.Time - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/hashstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). - -A quick code example is shown below: - -```go -type ComplexStruct struct { - Name string - Age uint - Metadata map[string]interface{} -} - -v := ComplexStruct{ - Name: "mitchellh", - Age: 64, - Metadata: map[string]interface{}{ - "car": true, - "location": "California", - "siblings": []string{"Bob", "John"}, - }, -} - -hash, err := hashstructure.Hash(v, nil) -if err != nil { - panic(err) -} - -fmt.Printf("%d", hash) -// Output: -// 2307517237273902113 -``` diff --git a/vendor/github.com/mitchellh/hashstructure/go.mod b/vendor/github.com/mitchellh/hashstructure/go.mod deleted file mode 100644 index 966582aa..00000000 --- a/vendor/github.com/mitchellh/hashstructure/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/hashstructure diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go deleted file mode 100644 index ea13a158..00000000 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ /dev/null @@ -1,358 +0,0 @@ -package hashstructure - -import ( - "encoding/binary" - "fmt" - "hash" - "hash/fnv" - "reflect" -) - -// ErrNotStringer is returned when there's an error with hash:"string" -type ErrNotStringer struct { - Field string -} - -// Error implements error for ErrNotStringer -func (ens *ErrNotStringer) Error() string { - return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) -} - -// HashOptions are options that are available for hashing. -type HashOptions struct { - // Hasher is the hash function to use. If this isn't set, it will - // default to FNV. - Hasher hash.Hash64 - - // TagName is the struct tag to look at when hashing the structure. - // By default this is "hash". - TagName string - - // ZeroNil is flag determining if nil pointer should be treated equal - // to a zero value of pointed type. By default this is false. - ZeroNil bool -} - -// Hash returns the hash value of an arbitrary value. -// -// If opts is nil, then default options will be used. See HashOptions -// for the default values. The same *HashOptions value cannot be used -// concurrently. None of the values within a *HashOptions struct are -// safe to read/write while hashing is being done. -// -// Notes on the value: -// -// * Unexported fields on structs are ignored and do not affect the -// hash value. -// -// * Adding an exported field to a struct with the zero value will change -// the hash value. -// -// For structs, the hashing can be controlled using tags. For example: -// -// struct { -// Name string -// UUID string `hash:"ignore"` -// } -// -// The available tag values are: -// -// * "ignore" or "-" - The field will be ignored and not affect the hash code. -// -// * "set" - The field will be treated as a set, where ordering doesn't -// affect the hash code. This only works for slices. -// -// * "string" - The field will be hashed as a string, only works when the -// field implements fmt.Stringer -// -func Hash(v interface{}, opts *HashOptions) (uint64, error) { - // Create default options - if opts == nil { - opts = &HashOptions{} - } - if opts.Hasher == nil { - opts.Hasher = fnv.New64() - } - if opts.TagName == "" { - opts.TagName = "hash" - } - - // Reset the hash - opts.Hasher.Reset() - - // Create our walker and walk the structure - w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, - } - return w.visit(reflect.ValueOf(v), nil) -} - -type walker struct { - h hash.Hash64 - tag string - zeronil bool -} - -type visitOpts struct { - // Flags are a bitmask of flags to affect behavior of this visit - Flags visitFlag - - // Information about the struct containing this field - Struct interface{} - StructField string -} - -func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { - t := reflect.TypeOf(0) - - // Loop since these can be wrapped in multiple layers of pointers - // and interfaces. - for { - // If we have an interface, dereference it. We have to do this up - // here because it might be a nil in there and the check below must - // catch that. - if v.Kind() == reflect.Interface { - v = v.Elem() - continue - } - - if v.Kind() == reflect.Ptr { - if w.zeronil { - t = v.Type().Elem() - } - v = reflect.Indirect(v) - continue - } - - break - } - - // If it is nil, treat it like a zero. - if !v.IsValid() { - v = reflect.Zero(t) - } - - // Binary writing can use raw ints, we have to convert to - // a sized-int, we'll choose the largest... - switch v.Kind() { - case reflect.Int: - v = reflect.ValueOf(int64(v.Int())) - case reflect.Uint: - v = reflect.ValueOf(uint64(v.Uint())) - case reflect.Bool: - var tmp int8 - if v.Bool() { - tmp = 1 - } - v = reflect.ValueOf(tmp) - } - - k := v.Kind() - - // We can shortcut numeric values by directly binary writing them - if k >= reflect.Int && k <= reflect.Complex64 { - // A direct hash calculation - w.h.Reset() - err := binary.Write(w.h, binary.LittleEndian, v.Interface()) - return w.h.Sum64(), err - } - - switch k { - case reflect.Array: - var h uint64 - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - h = hashUpdateOrdered(w.h, h, current) - } - - return h, nil - - case reflect.Map: - var includeMap IncludableMap - if opts != nil && opts.Struct != nil { - if v, ok := opts.Struct.(IncludableMap); ok { - includeMap = v - } - } - - // Build the hash for the map. We do this by XOR-ing all the key - // and value hashes. This makes it deterministic despite ordering. - var h uint64 - for _, k := range v.MapKeys() { - v := v.MapIndex(k) - if includeMap != nil { - incl, err := includeMap.HashIncludeMap( - opts.StructField, k.Interface(), v.Interface()) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - kh, err := w.visit(k, nil) - if err != nil { - return 0, err - } - vh, err := w.visit(v, nil) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - - return h, nil - - case reflect.Struct: - parent := v.Interface() - var include Includable - if impl, ok := parent.(Includable); ok { - include = impl - } - - t := v.Type() - h, err := w.visit(reflect.ValueOf(t.Name()), nil) - if err != nil { - return 0, err - } - - l := v.NumField() - for i := 0; i < l; i++ { - if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - var f visitFlag - fieldType := t.Field(i) - if fieldType.PkgPath != "" { - // Unexported - continue - } - - tag := fieldType.Tag.Get(w.tag) - if tag == "ignore" || tag == "-" { - // Ignore this field - continue - } - - // if string is set, use the string value - if tag == "string" { - if impl, ok := innerV.Interface().(fmt.Stringer); ok { - innerV = reflect.ValueOf(impl.String()) - } else { - return 0, &ErrNotStringer{ - Field: v.Type().Field(i).Name, - } - } - } - - // Check if we implement includable and check it - if include != nil { - incl, err := include.HashInclude(fieldType.Name, innerV) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - switch tag { - case "set": - f |= visitFlagSet - } - - kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) - if err != nil { - return 0, err - } - - vh, err := w.visit(innerV, &visitOpts{ - Flags: f, - Struct: parent, - StructField: fieldType.Name, - }) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - } - - return h, nil - - case reflect.Slice: - // We have two behaviors here. If it isn't a set, then we just - // visit all the elements. If it is a set, then we do a deterministic - // hash code. - var h uint64 - var set bool - if opts != nil { - set = (opts.Flags & visitFlagSet) != 0 - } - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - if set { - h = hashUpdateUnordered(h, current) - } else { - h = hashUpdateOrdered(w.h, h, current) - } - } - - return h, nil - - case reflect.String: - // Directly hash - w.h.Reset() - _, err := w.h.Write([]byte(v.String())) - return w.h.Sum64(), err - - default: - return 0, fmt.Errorf("unknown kind to hash: %s", k) - } - -} - -func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { - // For ordered updates, use a real hash function - h.Reset() - - // We just panic if the binary writes fail because we are writing - // an int64 which should never be fail-able. - e1 := binary.Write(h, binary.LittleEndian, a) - e2 := binary.Write(h, binary.LittleEndian, b) - if e1 != nil { - panic(e1) - } - if e2 != nil { - panic(e2) - } - - return h.Sum64() -} - -func hashUpdateUnordered(a, b uint64) uint64 { - return a ^ b -} - -// visitFlag is used as a bitmask for affecting visit behavior -type visitFlag uint - -const ( - visitFlagInvalid visitFlag = iota - visitFlagSet = iota << 1 -) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go deleted file mode 100644 index b6289c0b..00000000 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ /dev/null @@ -1,15 +0,0 @@ -package hashstructure - -// Includable is an interface that can optionally be implemented by -// a struct. It will be called for each field in the struct to check whether -// it should be included in the hash. -type Includable interface { - HashInclude(field string, v interface{}) (bool, error) -} - -// IncludableMap is an interface that can optionally be implemented by -// a struct. It will be called when a map-type field is found to ask the -// struct if the map item should be included in the hash. -type IncludableMap interface { - HashIncludeMap(field string, k, v interface{}) (bool, error) -} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go index d7ab7b6d..3a93a0b1 100644 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -230,7 +230,8 @@ func walkMap(v reflect.Value, w interface{}) error { ew.Enter(MapValue) } - if err := walk(kv, w); err != nil { + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { return err } diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore deleted file mode 100644 index 293955f9..00000000 --- a/vendor/github.com/posener/complete/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.idea -coverage.txt -gocomplete/gocomplete -example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml deleted file mode 100644 index 2fae9454..00000000 --- a/vendor/github.com/posener/complete/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.11 - - 1.10.x - - 1.9 - - 1.8 - -before_install: - - go get -u -t ./... - -script: - - GO111MODULE=on ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt deleted file mode 100644 index 16249b4a..00000000 --- a/vendor/github.com/posener/complete/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2017 Eyal Posener - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go deleted file mode 100644 index 17ab2c6d..00000000 --- a/vendor/github.com/posener/complete/args.go +++ /dev/null @@ -1,111 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" - "strings" - "unicode" -) - -// Args describes command line arguments -type Args struct { - // All lists of all arguments in command line (not including the command itself) - All []string - // Completed lists of all completed arguments in command line, - // If the last one is still being typed - no space after it, - // it won't appear in this list of arguments. - Completed []string - // Last argument in command line, the one being typed, if the last - // character in the command line is a space, this argument will be empty, - // otherwise this would be the last word. - Last string - // LastCompleted is the last argument that was fully typed. - // If the last character in the command line is space, this would be the - // last word, otherwise, it would be the word before that. - LastCompleted string -} - -// Directory gives the directory of the current written -// last argument if it represents a file name being written. -// in case that it is not, we fall back to the current directory. -func (a Args) Directory() string { - if info, err := os.Stat(a.Last); err == nil && info.IsDir() { - return fixPathForm(a.Last, a.Last) - } - dir := filepath.Dir(a.Last) - if info, err := os.Stat(dir); err != nil || !info.IsDir() { - return "./" - } - return fixPathForm(a.Last, dir) -} - -func newArgs(line string) Args { - var ( - all []string - completed []string - ) - parts := splitFields(line) - if len(parts) > 0 { - all = parts[1:] - completed = removeLast(parts[1:]) - } - return Args{ - All: all, - Completed: completed, - Last: last(parts), - LastCompleted: last(completed), - } -} - -// splitFields returns a list of fields from the given command line. -// If the last character is space, it appends an empty field in the end -// indicating that the field before it was completed. -// If the last field is of the form "a=b", it splits it to two fields: "a", "b", -// So it can be completed. -func splitFields(line string) []string { - parts := strings.Fields(line) - - // Add empty field if the last field was completed. - if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { - parts = append(parts, "") - } - - // Treat the last field if it is of the form "a=b" - parts = splitLastEqual(parts) - return parts -} - -func splitLastEqual(line []string) []string { - if len(line) == 0 { - return line - } - parts := strings.Split(line[len(line)-1], "=") - return append(line[:len(line)-1], parts...) -} - -func (a Args) from(i int) Args { - if i > len(a.All) { - i = len(a.All) - } - a.All = a.All[i:] - - if i > len(a.Completed) { - i = len(a.Completed) - } - a.Completed = a.Completed[i:] - return a -} - -func removeLast(a []string) []string { - if len(a) > 0 { - return a[:len(a)-1] - } - return a -} - -func last(args []string) string { - if len(args) == 0 { - return "" - } - return args[len(args)-1] -} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go deleted file mode 100644 index b99fe529..00000000 --- a/vendor/github.com/posener/complete/cmd/cmd.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package cmd used for command line options for the complete tool -package cmd - -import ( - "errors" - "flag" - "fmt" - "os" - "strings" - - "github.com/posener/complete/cmd/install" -) - -// CLI for command line -type CLI struct { - Name string - InstallName string - UninstallName string - - install bool - uninstall bool - yes bool -} - -const ( - defaultInstallName = "install" - defaultUninstallName = "uninstall" -) - -// Run is used when running complete in command line mode. -// this is used when the complete is not completing words, but to -// install it or uninstall it. -func (f *CLI) Run() bool { - err := f.validate() - if err != nil { - os.Stderr.WriteString(err.Error() + "\n") - os.Exit(1) - } - - switch { - case f.install: - f.prompt() - err = install.Install(f.Name) - case f.uninstall: - f.prompt() - err = install.Uninstall(f.Name) - default: - // non of the action flags matched, - // returning false should make the real program execute - return false - } - - if err != nil { - fmt.Printf("%s failed! %s\n", f.action(), err) - os.Exit(3) - } - fmt.Println("Done!") - return true -} - -// prompt use for approval -// exit if approval was not given -func (f *CLI) prompt() { - defer fmt.Println(f.action() + "ing...") - if f.yes { - return - } - fmt.Printf("%s completion for %s? ", f.action(), f.Name) - var answer string - fmt.Scanln(&answer) - - switch strings.ToLower(answer) { - case "y", "yes": - return - default: - fmt.Println("Cancelling...") - os.Exit(1) - } -} - -// AddFlags adds the CLI flags to the flag set. -// If flags is nil, the default command line flags will be taken. -// Pass non-empty strings as installName and uninstallName to override the default -// flag names. -func (f *CLI) AddFlags(flags *flag.FlagSet) { - if flags == nil { - flags = flag.CommandLine - } - - if f.InstallName == "" { - f.InstallName = defaultInstallName - } - if f.UninstallName == "" { - f.UninstallName = defaultUninstallName - } - - if flags.Lookup(f.InstallName) == nil { - flags.BoolVar(&f.install, f.InstallName, false, - fmt.Sprintf("Install completion for %s command", f.Name)) - } - if flags.Lookup(f.UninstallName) == nil { - flags.BoolVar(&f.uninstall, f.UninstallName, false, - fmt.Sprintf("Uninstall completion for %s command", f.Name)) - } - if flags.Lookup("y") == nil { - flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") - } -} - -// validate the CLI -func (f *CLI) validate() error { - if f.install && f.uninstall { - return errors.New("Install and uninstall are mutually exclusive") - } - return nil -} - -// action name according to the CLI values. -func (f *CLI) action() string { - switch { - case f.install: - return "Install" - case f.uninstall: - return "Uninstall" - default: - return "unknown" - } -} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go deleted file mode 100644 index a287f998..00000000 --- a/vendor/github.com/posener/complete/cmd/install/bash.go +++ /dev/null @@ -1,32 +0,0 @@ -package install - -import "fmt" - -// (un)install in bash -// basically adds/remove from .bashrc: -// -// complete -C -type bash struct { - rc string -} - -func (b bash) Install(cmd, bin string) error { - completeCmd := b.cmd(cmd, bin) - if lineInFile(b.rc, completeCmd) { - return fmt.Errorf("already installed in %s", b.rc) - } - return appendToFile(b.rc, completeCmd) -} - -func (b bash) Uninstall(cmd, bin string) error { - completeCmd := b.cmd(cmd, bin) - if !lineInFile(b.rc, completeCmd) { - return fmt.Errorf("does not installed in %s", b.rc) - } - - return removeFromFile(b.rc, completeCmd) -} - -func (bash) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go deleted file mode 100644 index 6467196b..00000000 --- a/vendor/github.com/posener/complete/cmd/install/fish.go +++ /dev/null @@ -1,56 +0,0 @@ -package install - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "text/template" -) - -// (un)install in fish - -type fish struct { - configDir string -} - -func (f fish) Install(cmd, bin string) error { - completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) - completeCmd, err := f.cmd(cmd, bin) - if err != nil { - return err - } - if _, err := os.Stat(completionFile); err == nil { - return fmt.Errorf("already installed at %s", completionFile) - } - - return createFile(completionFile, completeCmd) -} - -func (f fish) Uninstall(cmd, bin string) error { - completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) - if _, err := os.Stat(completionFile); err != nil { - return fmt.Errorf("does not installed in %s", f.configDir) - } - - return os.Remove(completionFile) -} - -func (f fish) cmd(cmd, bin string) (string, error) { - var buf bytes.Buffer - params := struct{ Cmd, Bin string }{cmd, bin} - tmpl := template.Must(template.New("cmd").Parse(` -function __complete_{{.Cmd}} - set -lx COMP_LINE (string join ' ' (commandline -o)) - test (commandline -ct) = "" - and set COMP_LINE "$COMP_LINE " - {{.Bin}} -end -complete -c {{.Cmd}} -a "(__complete_{{.Cmd}})" -`)) - err := tmpl.Execute(&buf, params) - if err != nil { - return "", err - } - return buf.String(), nil -} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go deleted file mode 100644 index dfa1963b..00000000 --- a/vendor/github.com/posener/complete/cmd/install/install.go +++ /dev/null @@ -1,119 +0,0 @@ -package install - -import ( - "errors" - "os" - "os/user" - "path/filepath" - - "github.com/hashicorp/go-multierror" -) - -type installer interface { - Install(cmd, bin string) error - Uninstall(cmd, bin string) error -} - -// Install complete command given: -// cmd: is the command name -func Install(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not find any shells to install") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Install(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -// Uninstall complete command given: -// cmd: is the command name -func Uninstall(cmd string) error { - is := installers() - if len(is) == 0 { - return errors.New("Did not find any shells to uninstall") - } - bin, err := getBinaryPath() - if err != nil { - return err - } - - for _, i := range is { - errI := i.Uninstall(cmd, bin) - if errI != nil { - err = multierror.Append(err, errI) - } - } - - return err -} - -func installers() (i []installer) { - for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} { - if f := rcFile(rc); f != "" { - i = append(i, bash{f}) - break - } - } - if f := rcFile(".zshrc"); f != "" { - i = append(i, zsh{f}) - } - if d := fishConfigDir(); d != "" { - i = append(i, fish{d}) - } - return -} - -func fishConfigDir() string { - configDir := filepath.Join(getConfigHomePath(), "fish") - if configDir == "" { - return "" - } - if info, err := os.Stat(configDir); err != nil || !info.IsDir() { - return "" - } - return configDir -} - -func getConfigHomePath() string { - u, err := user.Current() - if err != nil { - return "" - } - - configHome := os.Getenv("XDG_CONFIG_HOME") - if configHome == "" { - return filepath.Join(u.HomeDir, ".config") - } - return configHome -} - -func getBinaryPath() (string, error) { - bin, err := os.Executable() - if err != nil { - return "", err - } - return filepath.Abs(bin) -} - -func rcFile(name string) string { - u, err := user.Current() - if err != nil { - return "" - } - path := filepath.Join(u.HomeDir, name) - if _, err := os.Stat(path); err != nil { - return "" - } - return path -} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go deleted file mode 100644 index d34ac8ca..00000000 --- a/vendor/github.com/posener/complete/cmd/install/utils.go +++ /dev/null @@ -1,140 +0,0 @@ -package install - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -func lineInFile(name string, lookFor string) bool { - f, err := os.Open(name) - if err != nil { - return false - } - defer f.Close() - r := bufio.NewReader(f) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - return false - } - if err != nil { - return false - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - if string(line) == lookFor { - return true - } - prefix = prefix[:0] - } -} - -func createFile(name string, content string) error { - // make sure file directory exists - if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { - return err - } - - // create the file - f, err := os.Create(name) - if err != nil { - return err - } - defer f.Close() - - // write file content - _, err = f.WriteString(fmt.Sprintf("%s\n", content)) - return err -} - -func appendToFile(name string, content string) error { - f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) - if err != nil { - return err - } - defer f.Close() - _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) - return err -} - -func removeFromFile(name string, content string) error { - backup := name + ".bck" - err := copyFile(name, backup) - if err != nil { - return err - } - temp, err := removeContentToTempFile(name, content) - if err != nil { - return err - } - - err = copyFile(temp, name) - if err != nil { - return err - } - - return os.Remove(backup) -} - -func removeContentToTempFile(name, content string) (string, error) { - rf, err := os.Open(name) - if err != nil { - return "", err - } - defer rf.Close() - wf, err := ioutil.TempFile("/tmp", "complete-") - if err != nil { - return "", err - } - defer wf.Close() - - r := bufio.NewReader(rf) - prefix := []byte{} - for { - line, isPrefix, err := r.ReadLine() - if err == io.EOF { - break - } - if err != nil { - return "", err - } - if isPrefix { - prefix = append(prefix, line...) - continue - } - line = append(prefix, line...) - str := string(line) - if str == content { - continue - } - _, err = wf.WriteString(str + "\n") - if err != nil { - return "", err - } - prefix = prefix[:0] - } - return wf.Name(), nil -} - -func copyFile(src string, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return err - } - defer out.Close() - _, err = io.Copy(out, in) - return err -} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go deleted file mode 100644 index a625f53c..00000000 --- a/vendor/github.com/posener/complete/cmd/install/zsh.go +++ /dev/null @@ -1,39 +0,0 @@ -package install - -import "fmt" - -// (un)install in zsh -// basically adds/remove from .zshrc: -// -// autoload -U +X bashcompinit && bashcompinit" -// complete -C -type zsh struct { - rc string -} - -func (z zsh) Install(cmd, bin string) error { - completeCmd := z.cmd(cmd, bin) - if lineInFile(z.rc, completeCmd) { - return fmt.Errorf("already installed in %s", z.rc) - } - - bashCompInit := "autoload -U +X bashcompinit && bashcompinit" - if !lineInFile(z.rc, bashCompInit) { - completeCmd = bashCompInit + "\n" + completeCmd - } - - return appendToFile(z.rc, completeCmd) -} - -func (z zsh) Uninstall(cmd, bin string) error { - completeCmd := z.cmd(cmd, bin) - if !lineInFile(z.rc, completeCmd) { - return fmt.Errorf("does not installed in %s", z.rc) - } - - return removeFromFile(z.rc, completeCmd) -} - -func (zsh) cmd(cmd, bin string) string { - return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) -} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go deleted file mode 100644 index 82d37d52..00000000 --- a/vendor/github.com/posener/complete/command.go +++ /dev/null @@ -1,111 +0,0 @@ -package complete - -// Command represents a command line -// It holds the data that enables auto completion of command line -// Command can also be a sub command. -type Command struct { - // Sub is map of sub commands of the current command - // The key refer to the sub command name, and the value is it's - // Command descriptive struct. - Sub Commands - - // Flags is a map of flags that the command accepts. - // The key is the flag name, and the value is it's predictions. - Flags Flags - - // GlobalFlags is a map of flags that the command accepts. - // Global flags that can appear also after a sub command. - GlobalFlags Flags - - // Args are extra arguments that the command accepts, those who are - // given without any flag before. - Args Predictor -} - -// Predict returns all possible predictions for args according to the command struct -func (c *Command) Predict(a Args) []string { - options, _ := c.predict(a) - return options -} - -// Commands is the type of Sub member, it maps a command name to a command struct -type Commands map[string]Command - -// Predict completion of sub command names names according to command line arguments -func (c Commands) Predict(a Args) (prediction []string) { - for sub := range c { - prediction = append(prediction, sub) - } - return -} - -// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. -type Flags map[string]Predictor - -// Predict completion of flags names according to command line arguments -func (f Flags) Predict(a Args) (prediction []string) { - for flag := range f { - // If the flag starts with a hyphen, we avoid emitting the prediction - // unless the last typed arg contains a hyphen as well. - flagHyphenStart := len(flag) != 0 && flag[0] == '-' - lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' - if flagHyphenStart && !lastHyphenStart { - continue - } - prediction = append(prediction, flag) - } - return -} - -// predict options -// only is set to true if no more options are allowed to be returned -// those are in cases of special flag that has specific completion arguments, -// and other flags or sub commands can't come after it. -func (c *Command) predict(a Args) (options []string, only bool) { - - // search sub commands for predictions first - subCommandFound := false - for i, arg := range a.Completed { - if cmd, ok := c.Sub[arg]; ok { - subCommandFound = true - - // recursive call for sub command - options, only = cmd.predict(a.from(i)) - if only { - return - } - - // We matched so stop searching. Continuing to search can accidentally - // match a subcommand with current set of commands, see issue #46. - break - } - } - - // if last completed word is a global flag that we need to complete - if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to global flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.GlobalFlags.Predict(a)...) - - // if a sub command was entered, we won't add the parent command - // completions and we return here. - if subCommandFound { - return - } - - // if last completed word is a command flag that we need to complete - if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { - Log("Predicting according to flag %s", a.LastCompleted) - return predictor.Predict(a), true - } - - options = append(options, c.Sub.Predict(a)...) - options = append(options, c.Flags.Predict(a)...) - if c.Args != nil { - options = append(options, c.Args.Predict(a)...) - } - - return -} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go deleted file mode 100644 index 725c4deb..00000000 --- a/vendor/github.com/posener/complete/complete.go +++ /dev/null @@ -1,109 +0,0 @@ -// Package complete provides a tool for bash writing bash completion in go. -// -// Writing bash completion scripts is a hard work. This package provides an easy way -// to create bash completion scripts for any command, and also an easy way to install/uninstall -// the completion of the command. -package complete - -import ( - "flag" - "fmt" - "io" - "os" - "strconv" - - "github.com/posener/complete/cmd" - "github.com/posener/complete/match" -) - -const ( - envLine = "COMP_LINE" - envPoint = "COMP_POINT" - envDebug = "COMP_DEBUG" -) - -// Complete structs define completion for a command with CLI options -type Complete struct { - Command Command - cmd.CLI - Out io.Writer -} - -// New creates a new complete command. -// name is the name of command we want to auto complete. -// IMPORTANT: it must be the same name - if the auto complete -// completes the 'go' command, name must be equal to "go". -// command is the struct of the command completion. -func New(name string, command Command) *Complete { - return &Complete{ - Command: command, - CLI: cmd.CLI{Name: name}, - Out: os.Stdout, - } -} - -// Run runs the completion and add installation flags beforehand. -// The flags are added to the main flag CommandLine variable. -func (c *Complete) Run() bool { - c.AddFlags(nil) - flag.Parse() - return c.Complete() -} - -// Complete a command from completion line in environment variable, -// and print out the complete options. -// returns success if the completion ran or if the cli matched -// any of the given flags, false otherwise -// For installation: it assumes that flags were added and parsed before -// it was called. -func (c *Complete) Complete() bool { - line, point, ok := getEnv() - if !ok { - // make sure flags parsed, - // in case they were not added in the main program - return c.CLI.Run() - } - - if point >= 0 && point < len(line) { - line = line[:point] - } - - Log("Completing phrase: %s", line) - a := newArgs(line) - Log("Completing last field: %s", a.Last) - options := c.Command.Predict(a) - Log("Options: %s", options) - - // filter only options that match the last argument - matches := []string{} - for _, option := range options { - if match.Prefix(option, a.Last) { - matches = append(matches, option) - } - } - Log("Matches: %s", matches) - c.output(matches) - return true -} - -func getEnv() (line string, point int, ok bool) { - line = os.Getenv(envLine) - if line == "" { - return - } - point, err := strconv.Atoi(os.Getenv(envPoint)) - if err != nil { - // If failed parsing point for some reason, set it to point - // on the end of the line. - Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) - point = len(line) - } - return line, point, true -} - -func (c *Complete) output(options []string) { - // stdout of program defines the complete options - for _, option := range options { - fmt.Fprintln(c.Out, option) - } -} diff --git a/vendor/github.com/posener/complete/go.mod b/vendor/github.com/posener/complete/go.mod deleted file mode 100644 index fef0c440..00000000 --- a/vendor/github.com/posener/complete/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/posener/complete - -require github.com/hashicorp/go-multierror v1.0.0 diff --git a/vendor/github.com/posener/complete/go.sum b/vendor/github.com/posener/complete/go.sum deleted file mode 100644 index d2f13301..00000000 --- a/vendor/github.com/posener/complete/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go deleted file mode 100644 index c3029556..00000000 --- a/vendor/github.com/posener/complete/log.go +++ /dev/null @@ -1,22 +0,0 @@ -package complete - -import ( - "io/ioutil" - "log" - "os" -) - -// Log is used for debugging purposes -// since complete is running on tab completion, it is nice to -// have logs to the stderr (when writing your own completer) -// to write logs, set the COMP_DEBUG environment variable and -// use complete.Log in the complete program -var Log = getLogger() - -func getLogger() func(format string, args ...interface{}) { - var logfile = ioutil.Discard - if os.Getenv(envDebug) != "" { - logfile = os.Stderr - } - return log.New(logfile, "complete ", log.Flags()).Printf -} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go deleted file mode 100644 index 051171e8..00000000 --- a/vendor/github.com/posener/complete/match/file.go +++ /dev/null @@ -1,19 +0,0 @@ -package match - -import "strings" - -// File returns true if prefix can match the file -func File(file, prefix string) bool { - // special case for current directory completion - if file == "./" && (prefix == "." || prefix == "") { - return true - } - if prefix == "." && strings.HasPrefix(file, ".") { - return true - } - - file = strings.TrimPrefix(file, "./") - prefix = strings.TrimPrefix(prefix, "./") - - return strings.HasPrefix(file, prefix) -} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go deleted file mode 100644 index 812fcac9..00000000 --- a/vendor/github.com/posener/complete/match/match.go +++ /dev/null @@ -1,6 +0,0 @@ -package match - -// Match matches two strings -// it is used for comparing a term to the last typed -// word, the prefix, and see if it is a possible auto complete option. -type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go deleted file mode 100644 index 9a01ba63..00000000 --- a/vendor/github.com/posener/complete/match/prefix.go +++ /dev/null @@ -1,9 +0,0 @@ -package match - -import "strings" - -// Prefix is a simple Matcher, if the word is it's prefix, there is a match -// Match returns true if a has the prefix as prefix -func Prefix(long, prefix string) bool { - return strings.HasPrefix(long, prefix) -} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go deleted file mode 100644 index 82070632..00000000 --- a/vendor/github.com/posener/complete/predict.go +++ /dev/null @@ -1,41 +0,0 @@ -package complete - -// Predictor implements a predict method, in which given -// command line arguments returns a list of options it predicts. -type Predictor interface { - Predict(Args) []string -} - -// PredictOr unions two predicate functions, so that the result predicate -// returns the union of their predication -func PredictOr(predictors ...Predictor) Predictor { - return PredictFunc(func(a Args) (prediction []string) { - for _, p := range predictors { - if p == nil { - continue - } - prediction = append(prediction, p.Predict(a)...) - } - return - }) -} - -// PredictFunc determines what terms can follow a command or a flag -// It is used for auto completion, given last - the last word in the already -// in the command line, what words can complete it. -type PredictFunc func(Args) []string - -// Predict invokes the predict function and implements the Predictor interface -func (p PredictFunc) Predict(a Args) []string { - if p == nil { - return nil - } - return p(a) -} - -// PredictNothing does not expect anything after. -var PredictNothing Predictor - -// PredictAnything expects something, but nothing particular, such as a number -// or arbitrary name. -var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go deleted file mode 100644 index c8adf7e8..00000000 --- a/vendor/github.com/posener/complete/predict_files.go +++ /dev/null @@ -1,108 +0,0 @@ -package complete - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/posener/complete/match" -) - -// PredictDirs will search for directories in the given started to be typed -// path, if no path was started to be typed, it will complete to directories -// in the current working directory. -func PredictDirs(pattern string) Predictor { - return files(pattern, false) -} - -// PredictFiles will search for files matching the given pattern in the started to -// be typed path, if no path was started to be typed, it will complete to files that -// match the pattern in the current working directory. -// To match any file, use "*" as pattern. To match go files use "*.go", and so on. -func PredictFiles(pattern string) Predictor { - return files(pattern, true) -} - -func files(pattern string, allowFiles bool) PredictFunc { - - // search for files according to arguments, - // if only one directory has matched the result, search recursively into - // this directory to give more results. - return func(a Args) (prediction []string) { - prediction = predictFiles(a, pattern, allowFiles) - - // if the number of prediction is not 1, we either have many results or - // have no results, so we return it. - if len(prediction) != 1 { - return - } - - // only try deeper, if the one item is a directory - if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { - return - } - - a.Last = prediction[0] - return predictFiles(a, pattern, allowFiles) - } -} - -func predictFiles(a Args, pattern string, allowFiles bool) []string { - if strings.HasSuffix(a.Last, "/..") { - return nil - } - - dir := a.Directory() - files := listFiles(dir, pattern, allowFiles) - - // add dir if match - files = append(files, dir) - - return PredictFilesSet(files).Predict(a) -} - -// PredictFilesSet predict according to file rules to a given set of file names -func PredictFilesSet(files []string) PredictFunc { - return func(a Args) (prediction []string) { - // add all matching files to prediction - for _, f := range files { - f = fixPathForm(a.Last, f) - - // test matching of file to the argument - if match.File(f, a.Last) { - prediction = append(prediction, f) - } - } - return - } -} - -func listFiles(dir, pattern string, allowFiles bool) []string { - // set of all file names - m := map[string]bool{} - - // list files - if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { - for _, f := range files { - if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { - m[f] = true - } - } - } - - // list directories - if dirs, err := ioutil.ReadDir(dir); err == nil { - for _, d := range dirs { - if d.IsDir() { - m[filepath.Join(dir, d.Name())] = true - } - } - } - - list := make([]string, 0, len(m)) - for k := range m { - list = append(list, k) - } - return list -} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go deleted file mode 100644 index fa4a34ae..00000000 --- a/vendor/github.com/posener/complete/predict_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package complete - -// PredictSet expects specific set of terms, given in the options argument. -func PredictSet(options ...string) Predictor { - return predictSet(options) -} - -type predictSet []string - -func (p predictSet) Predict(a Args) []string { - return p -} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md deleted file mode 100644 index 6d757ef8..00000000 --- a/vendor/github.com/posener/complete/readme.md +++ /dev/null @@ -1,118 +0,0 @@ -# complete - -A tool for bash writing bash completion in go, and bash completion for the go command line. - -[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) -[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) -[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) -[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) -[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) - -Writing bash completion scripts is a hard work. This package provides an easy way -to create bash completion scripts for any command, and also an easy way to install/uninstall -the completion of the command. - -## go command bash completion - -In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. - -This is an example that uses the `complete` package on the `go` command - the `complete` package -can also be used to implement any completions, see [Usage](#usage). - -### Install - -1. Type in your shell: -``` -go get -u github.com/posener/complete/gocomplete -gocomplete -install -``` - -2. Restart your shell - -Uninstall by `gocomplete -uninstall` - -### Features - -- Complete `go` command, including sub commands and all flags. -- Complete packages names or `.go` files when necessary. -- Complete test names after `-run` flag. - -## complete package - -Supported shells: - -- [x] bash -- [x] zsh -- [x] fish - -### Usage - -Assuming you have program called `run` and you want to have bash completion -for it, meaning, if you type `run` then space, then press the `Tab` key, -the shell will suggest relevant complete options. - -In that case, we will create a program called `runcomplete`, a go program, -with a `func main()` and so, that will make the completion of the `run` -program. Once the `runcomplete` will be in a binary form, we could -`runcomplete -install` and that will add to our shell all the bash completion -options for `run`. - -So here it is: - -```go -import "github.com/posener/complete" - -func main() { - - // create a Command object, that represents the command we want - // to complete. - run := complete.Command{ - - // Sub defines a list of sub commands of the program, - // this is recursive, since every command is of type command also. - Sub: complete.Commands{ - - // add a build sub command - "build": complete.Command { - - // define flags of the build sub command - Flags: complete.Flags{ - // build sub command has a flag '-cpus', which - // expects number of cpus after it. in that case - // anything could complete this flag. - "-cpus": complete.PredictAnything, - }, - }, - }, - - // define flags of the 'run' main command - Flags: complete.Flags{ - // a flag -o, which expects a file ending with .out after - // it, the tab completion will auto complete for files matching - // the given pattern. - "-o": complete.PredictFiles("*.out"), - }, - - // define global flags of the 'run' main command - // those will show up also when a sub command was entered in the - // command line - GlobalFlags: complete.Flags{ - - // a flag '-h' which does not expects anything after it - "-h": complete.PredictNothing, - }, - } - - // run the command completion, as part of the main() function. - // this triggers the autocompletion when needed. - // name must be exactly as the binary that we want to complete. - complete.New("run", run).Run() -} -``` - -### Self completing program - -In case that the program that we want to complete is written in go we -can make it self completing. - -Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh deleted file mode 100644 index 56bfcf15..00000000 --- a/vendor/github.com/posener/complete/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -v -race -coverprofile=profile.out -covermode=atomic $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go deleted file mode 100644 index 58b8b792..00000000 --- a/vendor/github.com/posener/complete/utils.go +++ /dev/null @@ -1,46 +0,0 @@ -package complete - -import ( - "os" - "path/filepath" - "strings" -) - -// fixPathForm changes a file name to a relative name -func fixPathForm(last string, file string) string { - // get wording directory for relative name - workDir, err := os.Getwd() - if err != nil { - return file - } - - abs, err := filepath.Abs(file) - if err != nil { - return file - } - - // if last is absolute, return path as absolute - if filepath.IsAbs(last) { - return fixDirPath(abs) - } - - rel, err := filepath.Rel(workDir, abs) - if err != nil { - return file - } - - // fix ./ prefix of path - if rel != "." && strings.HasPrefix(last, ".") { - rel = "./" + rel - } - - return fixDirPath(rel) -} - -func fixDirPath(path string) string { - info, err := os.Stat(path) - if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { - path += "/" - } - return path -} diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index 0637db72..00000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 0c9b04b5..00000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,452 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is an filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in a any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware - - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define you own file system. -* Wrap for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open('/tmp/foo') -``` -We would replace it with: -```go -AppFs.Open('/tmp/foo') -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755)) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755)) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir())) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* ZIP -* TAR -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -* **0.10.0** 2015.12.10 - * Full compatibility with Windows - * Introduction of afero utilities - * Test suite rewritten to work cross platform - * Normalize paths for MemMapFs - * Adding Sync to the file interface - * **Breaking Change** Walk and ReadDir have changed parameter order - * Moving types used by MemMapFs to a subpackage - * General bugfixes and improvements -* **0.9.0** 2015.11.05 - * New Walk function similar to filepath.Walk - * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC - * MemMapFs.Remove now really deletes the file - * InMemoryFile.Readdir and Readdirnames work correctly - * InMemoryFile functions lock it for concurrent access - * Test suite improvements -* **0.8.0** 2014.10.28 - * First public version - * Interfaces feel ready for people to build using - * Interfaces satisfy all known uses - * MemMapFs passes the majority of the OS test suite - * OsFs passes the majority of the OS test suite - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index f5b5e127..00000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - //Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index a633ad50..00000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - - go env - - go get -v github.com/spf13/afero/... - - go build github.com/spf13/afero -test_script: -- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 616ff8ff..00000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,180 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 29a26c67..00000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,290 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 5728243d..00000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 968fc278..00000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index e8108a85..00000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,293 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod deleted file mode 100644 index 08685509..00000000 --- a/vendor/github.com/spf13/afero/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/afero - -require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum deleted file mode 100644 index 6bad37b2..00000000 --- a/vendor/github.com/spf13/afero/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index c4219368..00000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index 5c3a3d8f..00000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) -} - -func TempFile(fs Fs, dir, prefix string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0..00000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index c18a87fb..00000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f..00000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5..00000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 7af2fb56..00000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" -) - -import "time" - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Read(b) -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case 0: - atomic.StoreInt64(&f.at, offset) - case 1: - atomic.AddInt64(&f.at, int64(offset)) - case 2: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 09498e70..00000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - m.mu.Unlock() - - m.Chmod(name, perm|os.ModeDir) - - return nil -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - chmod := false - file, err := m.openWrite(name) - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - m.Chmod(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p, _ := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index 13cc1b84..00000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f..00000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index c6376ec3..00000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,80 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 9d92dbc0..00000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,214 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index abcf12d3..00000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,316 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories -// At the end of the directory view, the error is io.EOF. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - - if f.off >= len(f.files) { - return nil, io.EOF - } - - if c == -1 { - return f.files[f.off:], nil - } - - if c > len(f.files) { - c = len(f.files) - } - - defer func() { f.off += c }() - return f.files[f.off:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f48..00000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE index 58ebdc16..d3214997 100644 --- a/vendor/github.com/ulikunitz/xz/LICENSE +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 Ulrich Kunitz +Copyright (c) 2014-2020 Ulrich Kunitz All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index c10e51b9..84bd5dcb 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -1,5 +1,9 @@ # TODO list +## Release v0.5.x + +1. Support check flag in gxz command. + ## Release v0.6 1. Review encoder and check for lzma improvements under xz. @@ -86,6 +90,20 @@ ## Log +## 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + ### 2018-10-28 Release v0.5.5 fixes issues #19 observing ErrLimit outputs. diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go index fadc1a59..dc8f3286 100644 --- a/vendor/github.com/ulikunitz/xz/bits.go +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -54,6 +54,8 @@ var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") // readUvarint reads a uvarint from the given byte reader. func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + var s uint i := 0 for { @@ -62,8 +64,11 @@ func readUvarint(r io.ByteReader) (x uint64, n int, err error) { return x, i, err } i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } if b < 0x80 { - if i > 10 || i == 10 && b > 1 { + if i == maxUvarintLen && b > 1 { return x, i, errOverflowU64 } return x | uint64(b)<= FixExt1 && c <= FixExt16 } diff --git a/vendor/github.com/vmihailenco/msgpack/decode.go b/vendor/github.com/vmihailenco/msgpack/decode.go index 74141004..2edfa9c3 100644 --- a/vendor/github.com/vmihailenco/msgpack/decode.go +++ b/vendor/github.com/vmihailenco/msgpack/decode.go @@ -69,8 +69,9 @@ func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { // UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose // to decode msgpack value into Go interface{}. -func (d *Decoder) UseDecodeInterfaceLoose(flag bool) { +func (d *Decoder) UseDecodeInterfaceLoose(flag bool) *Decoder { d.useLoose = flag + return d } // UseJSONTag causes the Decoder to use json struct tag as fallback option diff --git a/vendor/github.com/vmihailenco/msgpack/decode_map.go b/vendor/github.com/vmihailenco/msgpack/decode_map.go index b542a754..2a3d3ecb 100644 --- a/vendor/github.com/vmihailenco/msgpack/decode_map.go +++ b/vendor/github.com/vmihailenco/msgpack/decode_map.go @@ -206,7 +206,7 @@ func (d *Decoder) DecodeMap() (interface{}, error) { return nil, err } - if codes.IsString(code) { + if codes.IsString(code) || codes.IsBin(code) { return d.decodeMapStringInterfaceSize(size) } @@ -222,6 +222,7 @@ func (d *Decoder) DecodeMap() (interface{}, error) { keyType := reflect.TypeOf(key) valueType := reflect.TypeOf(value) + mapType := reflect.MapOf(keyType, valueType) mapValue := reflect.MakeMap(mapType) diff --git a/vendor/github.com/vmihailenco/msgpack/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/decode_slice.go index 778d80b9..7d43ec61 100644 --- a/vendor/github.com/vmihailenco/msgpack/decode_slice.go +++ b/vendor/github.com/vmihailenco/msgpack/decode_slice.go @@ -107,8 +107,8 @@ func decodeSliceValue(d *Decoder, v reflect.Value) error { if i >= v.Len() { v.Set(growSliceValue(v, n)) } - sv := v.Index(i) - if err := d.DecodeValue(sv); err != nil { + elem := v.Index(i) + if err := d.DecodeValue(elem); err != nil { return err } } diff --git a/vendor/github.com/vmihailenco/msgpack/decode_value.go b/vendor/github.com/vmihailenco/msgpack/decode_value.go index 7b858b5f..d458de86 100644 --- a/vendor/github.com/vmihailenco/msgpack/decode_value.go +++ b/vendor/github.com/vmihailenco/msgpack/decode_value.go @@ -154,8 +154,10 @@ func unmarshalValueAddr(d *Decoder, v reflect.Value) error { } func unmarshalValue(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - return d.decodeNilValue(v) + if d.extLen == 0 || d.extLen == 1 { + if d.hasNilCode() { + return d.decodeNilValue(v) + } } if v.IsNil() { diff --git a/vendor/github.com/vmihailenco/msgpack/encode.go b/vendor/github.com/vmihailenco/msgpack/encode.go index c2bb23cd..08ca7dec 100644 --- a/vendor/github.com/vmihailenco/msgpack/encode.go +++ b/vendor/github.com/vmihailenco/msgpack/encode.go @@ -50,8 +50,15 @@ func Marshal(v interface{}) ([]byte, error) { } type Encoder struct { - w writer + w writer + buf []byte + // timeBuf is lazily allocated in encodeTime() to + // avoid allocations when time.Time value are encoded + // + // buf can't be reused for time encoding, as buf is used + // to encode msgpack extLen + timeBuf []byte sortMapKeys bool structAsArray bool diff --git a/vendor/github.com/vmihailenco/msgpack/ext.go b/vendor/github.com/vmihailenco/msgpack/ext.go index 7970be85..2f9dfea3 100644 --- a/vendor/github.com/vmihailenco/msgpack/ext.go +++ b/vendor/github.com/vmihailenco/msgpack/ext.go @@ -9,7 +9,12 @@ import ( "github.com/vmihailenco/msgpack/codes" ) -var extTypes = make(map[int8]reflect.Type) +type extInfo struct { + Type reflect.Type + Decoder decoderFunc +} + +var extTypes = make(map[int8]extInfo) var bufferPool = &sync.Pool{ New: func() interface{} { @@ -39,13 +44,14 @@ func RegisterExt(id int8, value interface{}) { } func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { - if dec != nil { - extTypes[id] = typ - } if enc != nil { typEncMap[typ] = makeExtEncoder(id, enc) } if dec != nil { + extTypes[id] = extInfo{ + Type: typ, + Decoder: dec, + } typDecMap[typ] = makeExtDecoder(id, dec) } } @@ -99,8 +105,8 @@ func makeExtDecoder(typeId int8, dec decoderFunc) decoderFunc { return err } - if id != typeId { - return fmt.Errorf("msgpack: got ext type=%d, wanted %d", int8(c), typeId) + if int8(id) != typeId { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", int8(id), typeId) } d.extLen = extLen @@ -184,15 +190,15 @@ func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { return nil, err } - typ, ok := extTypes[extId] + info, ok := extTypes[extId] if !ok { - return nil, fmt.Errorf("msgpack: unregistered ext id=%d", extId) + return nil, fmt.Errorf("msgpack: unknown ext id=%d", extId) } - v := reflect.New(typ) + v := reflect.New(info.Type) d.extLen = extLen - err = d.DecodeValue(v.Elem()) + err = info.Decoder(d, v.Elem()) d.extLen = 0 if err != nil { return nil, err diff --git a/vendor/github.com/vmihailenco/msgpack/time.go b/vendor/github.com/vmihailenco/msgpack/time.go index f7409690..3cf01e4c 100644 --- a/vendor/github.com/vmihailenco/msgpack/time.go +++ b/vendor/github.com/vmihailenco/msgpack/time.go @@ -28,21 +28,25 @@ func (e *Encoder) EncodeTime(tm time.Time) error { } func (e *Encoder) encodeTime(tm time.Time) []byte { + if e.timeBuf == nil { + e.timeBuf = make([]byte, 12) + } + secs := uint64(tm.Unix()) if secs>>34 == 0 { data := uint64(tm.Nanosecond())<<34 | secs if data&0xffffffff00000000 == 0 { - b := make([]byte, 4) + b := e.timeBuf[:4] binary.BigEndian.PutUint32(b, uint32(data)) return b } else { - b := make([]byte, 8) + b := e.timeBuf[:8] binary.BigEndian.PutUint64(b, data) return b } } - b := make([]byte, 12) + b := e.timeBuf[:12] binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) binary.BigEndian.PutUint64(b[4:], uint64(secs)) return b diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml deleted file mode 100644 index 13ff9986..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - 1.12 - diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md deleted file mode 100644 index b3bc3b61..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md +++ /dev/null @@ -1,10 +0,0 @@ -# 1.0.1 (July 30, 2019) - -* The YAML decoder is now correctly treating quoted scalars as verbatim literal - strings rather than using the fuzzy type selection rules for them. Fuzzy - type selection rules still apply to unquoted scalars. - ([#4](https://github.com/zclconf/go-cty-yaml/pull/4)) - -# 1.0.0 (May 26, 2019) - -Initial release. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE deleted file mode 100644 index 4e6c00ab..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -This package is derived from gopkg.in/yaml.v2, which is copyright -2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Includes mechanical ports of code from libyaml, distributed under its original -license. See LICENSE.libyaml for more information. - -Modifications for cty interfacing copyright 2019 Martin Atkins, and -distributed under the same license terms. diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go deleted file mode 100644 index 1f7e87e6..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/apic.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go deleted file mode 100644 index a73b34a8..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/converter.go +++ /dev/null @@ -1,69 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" -) - -// ConverterConfig is used to configure a new converter, using NewConverter. -type ConverterConfig struct { - // EncodeAsFlow, when set to true, causes Marshal to produce flow-style - // mapping and sequence serializations. - EncodeAsFlow bool -} - -// A Converter can marshal and unmarshal between cty values and YAML bytes. -// -// Because there are many different ways to map cty to YAML and vice-versa, -// a converter is configurable using the settings in ConverterConfig, which -// allow for a few different permutations of mapping to YAML. -// -// If you are just trying to work with generic, standard YAML, the predefined -// converter in Standard should be good enough. -type Converter struct { - encodeAsFlow bool -} - -// NewConverter creates a new Converter with the given configuration. -func NewConverter(config *ConverterConfig) *Converter { - return &Converter{ - encodeAsFlow: config.EncodeAsFlow, - } -} - -// Standard is a predefined Converter that produces and consumes generic YAML -// using only built-in constructs that any other YAML implementation ought to -// understand. -var Standard *Converter = NewConverter(&ConverterConfig{}) - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { - return c.impliedType(src) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func (c *Converter) Marshal(v cty.Value) ([]byte, error) { - return c.marshal(v) -} - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// An error is returned if the given source contains any YAML document -// delimiters. -func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return c.unmarshal(src, ty) -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go deleted file mode 100644 index b91141cc..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go +++ /dev/null @@ -1,57 +0,0 @@ -package yaml - -import ( - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" -) - -// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code -// into a cty Value, using the ImpliedType and Unmarshal methods of the -// Standard pre-defined converter. -var YAMLDecodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "src", - Type: cty.String, - }, - }, - Type: func(args []cty.Value) (cty.Type, error) { - if !args[0].IsKnown() { - return cty.DynamicPseudoType, nil - } - if args[0].IsNull() { - return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") - } - return Standard.ImpliedType([]byte(args[0].AsString())) - }, - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if retType == cty.DynamicPseudoType { - return cty.DynamicVal, nil - } - return Standard.Unmarshal([]byte(args[0].AsString()), retType) - }, -}) - -// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value -// into YAML. -var YAMLEncodeFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "value", - Type: cty.DynamicPseudoType, - AllowNull: true, - AllowDynamicType: true, - }, - }, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - if !args[0].IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - raw, err := Standard.Marshal(args[0]) - if err != nil { - return cty.NilVal, err - } - return cty.StringVal(string(raw)), nil - }, -}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go deleted file mode 100644 index e369ff27..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/decode.go +++ /dev/null @@ -1,261 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilVal, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &valueAnalysis{ - anchorsPending: map[string]int{}, - anchorVals: map[string]cty.Value{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "missing start of document") - } - - v, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return convert.Convert(v, ty) -} - -func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilVal, parserError(p) - } - return c.unmarshalParseRemainder(an, &evt, p) -} - -func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.unmarshalScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.unmarshalAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.unmarshalMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.unmarshalSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilVal, parseEventErrorWrap(evt, err) - } - - if val.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - val = cty.StringVal("<<") - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, val) - } - return val, nil -} - -func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - vals := make(map[string]cty.Value) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - v := cty.ObjectVal(vals) - if anchor != "" { - an.completeAnchor(anchor, v) - } - return v, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilVal, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - ty := val.Type() - if !(ty.IsObjectType() || ty.IsMapType()) { - return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for it := val.ElementIterator(); it.Next(); { - k, v := it.Element() - vals[k.AsString()] = v - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") - } - val, err := c.unmarshalParse(an, p) - if err != nil { - return cty.NilVal, err - } - - vals[keyVal.AsString()] = val - } -} - -func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var vals []cty.Value - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilVal, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.TupleVal(vals) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - val, err := c.unmarshalParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilVal, err - } - - vals = append(vals, val) - } -} - -func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { - v, err := an.anchorVal(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return v, err -} - -type valueAnalysis struct { - anchorsPending map[string]int - anchorVals map[string]cty.Value -} - -func (an *valueAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorVals[name] = v -} - -func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorVals[name] - if !ok { - return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go deleted file mode 100644 index a1c2cc52..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go deleted file mode 100644 index daa1478a..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/encode.go +++ /dev/null @@ -1,189 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" - "strings" - - "github.com/zclconf/go-cty/cty" -) - -func (c *Converter) marshal(v cty.Value) ([]byte, error) { - var buf bytes.Buffer - - e := &yaml_emitter_t{} - yaml_emitter_initialize(e) - yaml_emitter_set_output_writer(e, &buf) - yaml_emitter_set_unicode(e, true) - - var evt yaml_event_t - yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_document_start_event_initialize(&evt, nil, nil, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - if err := c.marshalEmit(v, e); err != nil { - return nil, err - } - - yaml_document_end_event_initialize(&evt, true) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - yaml_stream_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return nil, emitterError(e) - } - - return buf.Bytes(), nil -} - -func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { - ty := v.Type() - switch { - case v.IsNull(): - return c.marshalPrimitive(v, e) - case !v.IsKnown(): - return fmt.Errorf("cannot serialize unknown value as YAML") - case ty.IsPrimitiveType(): - return c.marshalPrimitive(v, e) - case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): - return c.marshalSequence(v, e) - case ty.IsObjectType(), ty.IsMapType(): - return c.marshalMapping(v, e) - default: - return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) - } -} - -func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { - var evt yaml_event_t - - if v.IsNull() { - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte("null"), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil - } - - switch v.Type() { - case cty.String: - str := v.AsString() - style := yaml_DOUBLE_QUOTED_SCALAR_STYLE - if strings.Contains(str, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - style, - ) - case cty.Number: - str := v.AsBigFloat().Text('f', -1) - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - case cty.Bool: - var str string - switch v { - case cty.True: - str = "true" - case cty.False: - str = "false" - } - yaml_scalar_event_initialize( - &evt, - nil, - nil, - []byte(str), - true, - true, - yaml_PLAIN_SCALAR_STYLE, - ) - } - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_SEQUENCE_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_SEQUENCE_STYLE - } - - var evt yaml_event_t - yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - _, v := it.Element() - err := c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_sequence_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} - -func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { - style := yaml_BLOCK_MAPPING_STYLE - if c.encodeAsFlow { - style = yaml_FLOW_MAPPING_STYLE - } - - var evt yaml_event_t - yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - - for it := v.ElementIterator(); it.Next(); { - k, v := it.Element() - err := c.marshalEmit(k, e) - if err != nil { - return err - } - err = c.marshalEmit(v, e) - if err != nil { - return err - } - } - - yaml_mapping_end_event_initialize(&evt) - if !yaml_emitter_emit(e, &evt) { - return emitterError(e) - } - return nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go deleted file mode 100644 index ae41c488..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/error.go +++ /dev/null @@ -1,97 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" -) - -// Error is an error implementation used to report errors that correspond to -// a particular position in an input buffer. -type Error struct { - cause error - Line, Column int -} - -func (e Error) Error() string { - return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) -} - -// Cause is an implementation of the interface used by -// github.com/pkg/errors.Cause, returning the underlying error without the -// position information. -func (e Error) Cause() error { - return e.cause -} - -// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper -// returning the underlying error without the position information. -func (e Error) WrappedErrors() []error { - return []error{e.cause} -} - -func parserError(p *yaml_parser_t) error { - var cause error - if len(p.problem) > 0 { - cause = errors.New(p.problem) - } else { - cause = errors.New("invalid YAML syntax") // useless generic error, then - } - - return parserErrorWrap(p, cause) -} - -func parserErrorWrap(p *yaml_parser_t, cause error) error { - switch { - case p.problem_mark.line != 0: - line := p.problem_mark.line - column := p.problem_mark.column - // Scanner errors don't iterate line before returning error - if p.error == yaml_SCANNER_ERROR { - line++ - column = 0 - } - return Error{ - cause: cause, - Line: line, - Column: column + 1, - } - case p.context_mark.line != 0: - return Error{ - cause: cause, - Line: p.context_mark.line, - Column: p.context_mark.column + 1, - } - default: - return cause - } -} - -func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { - return parserErrorWrap(p, fmt.Errorf(f, vals...)) -} - -func parseEventErrorWrap(evt *yaml_event_t, cause error) error { - if evt.start_mark.line == 0 { - // Event does not have a start mark, so we won't wrap the error at all - return cause - } - return Error{ - cause: cause, - Line: evt.start_mark.line, - Column: evt.start_mark.column + 1, - } -} - -func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { - return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) -} - -func emitterError(e *yaml_emitter_t) error { - var cause error - if len(e.problem) > 0 { - cause = errors.New(e.problem) - } else { - cause = errors.New("failed to write YAML token") // useless generic error, then - } - return cause -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.mod b/vendor/github.com/zclconf/go-cty-yaml/go.mod deleted file mode 100644 index 3d522687..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/zclconf/go-cty-yaml - -require github.com/zclconf/go-cty v1.0.0 diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.sum b/vendor/github.com/zclconf/go-cty-yaml/go.sum deleted file mode 100644 index 841f7fcf..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/go.sum +++ /dev/null @@ -1,18 +0,0 @@ -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go deleted file mode 100644 index 5b7b0686..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go +++ /dev/null @@ -1,268 +0,0 @@ -package yaml - -import ( - "errors" - "fmt" - - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -func (c *Converter) impliedType(src []byte) (cty.Type, error) { - p := &yaml_parser_t{} - if !yaml_parser_initialize(p) { - return cty.NilType, errors.New("failed to initialize YAML parser") - } - if len(src) == 0 { - src = []byte{'\n'} - } - - an := &typeAnalysis{ - anchorsPending: map[string]int{}, - anchorTypes: map[string]cty.Type{}, - } - - yaml_parser_set_input_string(p, src) - - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing stream start token") - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "missing start of document") - } - - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ == yaml_DOCUMENT_START_EVENT { - return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") - } - if evt.typ != yaml_DOCUMENT_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) - } - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - if evt.typ != yaml_STREAM_END_EVENT { - return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") - } - - return ty, err -} - -func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { - var evt yaml_event_t - if !yaml_parser_parse(p, &evt) { - return cty.NilType, parserError(p) - } - return c.impliedTypeParseRemainder(an, &evt, p) -} - -func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - switch evt.typ { - case yaml_SCALAR_EVENT: - return c.impliedTypeScalar(an, evt, p) - case yaml_ALIAS_EVENT: - return c.impliedTypeAlias(an, evt, p) - case yaml_MAPPING_START_EVENT: - return c.impliedTypeMapping(an, evt, p) - case yaml_SEQUENCE_START_EVENT: - return c.impliedTypeSequence(an, evt, p) - case yaml_DOCUMENT_START_EVENT: - return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") - case yaml_STREAM_END_EVENT: - // Decoding an empty buffer, probably - return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") - default: - // Should never happen; the above should be comprehensive - return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) - } -} - -func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - src := evt.value - tag := string(evt.tag) - anchor := string(evt.anchor) - implicit := evt.implicit - - if len(anchor) > 0 { - an.beginAnchor(anchor) - } - - var ty cty.Type - switch { - case tag == "" && !implicit: - // Untagged explicit string - ty = cty.String - default: - v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) - if err != nil { - return cty.NilType, parseEventErrorWrap(evt, err) - } - if v.RawEquals(mergeMappingVal) { - // In any context other than a mapping key, this is just a plain string - ty = cty.String - } else { - ty = v.Type() - } - } - - if len(anchor) > 0 { - an.completeAnchor(anchor, ty) - } - return ty, nil -} - -func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_MAP_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - atys := make(map[string]cty.Type) - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_MAPPING_END_EVENT { - ty := cty.Object(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - if nextEvt.typ != yaml_SCALAR_EVENT { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) - if err != nil { - return cty.NilType, err - } - if keyVal.RawEquals(mergeMappingVal) { - // Merging the value (which must be a mapping) into our mapping, - // then. - ty, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - if !ty.IsObjectType() { - return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) - } - for name, aty := range ty.AttributeTypes() { - atys[name] = aty - } - continue - } - if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { - keyVal = keyValStr - } else { - return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") - } - if keyVal.IsNull() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") - } - if !keyVal.IsKnown() { - return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") - } - valTy, err := c.impliedTypeParse(an, p) - if err != nil { - return cty.NilType, err - } - - atys[keyVal.AsString()] = valTy - } -} - -func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - tag := string(evt.tag) - anchor := string(evt.anchor) - - if tag != "" && tag != yaml_SEQ_TAG { - return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) - } - - if anchor != "" { - an.beginAnchor(anchor) - } - - var atys []cty.Type - for { - var nextEvt yaml_event_t - if !yaml_parser_parse(p, &nextEvt) { - return cty.NilType, parserError(p) - } - if nextEvt.typ == yaml_SEQUENCE_END_EVENT { - ty := cty.Tuple(atys) - if anchor != "" { - an.completeAnchor(anchor, ty) - } - return ty, nil - } - - valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) - if err != nil { - return cty.NilType, err - } - - atys = append(atys, valTy) - } -} - -func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { - ty, err := an.anchorType(string(evt.anchor)) - if err != nil { - err = parseEventErrorWrap(evt, err) - } - return ty, err -} - -type typeAnalysis struct { - anchorsPending map[string]int - anchorTypes map[string]cty.Type -} - -func (an *typeAnalysis) beginAnchor(name string) { - an.anchorsPending[name]++ -} - -func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { - an.anchorsPending[name]-- - if an.anchorsPending[name] == 0 { - delete(an.anchorsPending, name) - } - an.anchorTypes[name] = ty -} - -func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { - if _, pending := an.anchorsPending[name]; pending { - // YAML normally allows self-referencing structures, but cty cannot - // represent them (it requires all structures to be finite) so we - // must fail here. - return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) - } - ty, ok := an.anchorTypes[name] - if !ok { - return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) - } - return ty, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go deleted file mode 100644 index 81d05dfe..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go deleted file mode 100644 index 7c1f5fac..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go deleted file mode 100644 index 0f643834..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/resolve.go +++ /dev/null @@ -1,288 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/zclconf/go-cty/cty" -) - -type resolveMapItem struct { - value cty.Value - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v cty.Value - tag string - l []string - }{ - {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { - if !resolvableTag(tag) { - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if src != "" { - hint = resolveTable[src[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { - return cty.StringVal(src), nil - } - - // Handle things we can lookup in a map. - if item, ok := resolveMap[src]; ok { - return item.value, nil - } - - if tag == "" { - for _, nan := range []string{".nan", ".NaN", ".NAN"} { - if src == nan { - // cty cannot represent NaN, so this is an error - return cty.NilVal, fmt.Errorf("floating point NaN is not supported") - } - } - } - - // Base 60 floats are intentionally not supported. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - if numberVal, err := cty.ParseNumberVal(src); err == nil { - return numberVal, nil - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - } - - plain := strings.Replace(src, "_", "", -1) - if numberVal, err := cty.ParseNumberVal(plain); err == nil { - return numberVal, nil - } - if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") { - tag = yaml_INT_TAG // will handle parsing below in our tag switch - } - default: - panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) - } - } - - if tag == "" && src == "<<" { - return mergeMappingVal, nil - } - - switch tag { - case yaml_STR_TAG, yaml_BINARY_TAG: - // If it's binary then we want to keep the base64 representation, because - // cty has no binary type, but we will check that it's actually base64. - if tag == yaml_BINARY_TAG { - _, err := base64.StdEncoding.DecodeString(src) - if err != nil { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) - } - } - return cty.StringVal(src), nil - case yaml_BOOL_TAG: - item, ok := resolveMap[src] - if !ok || item.tag != yaml_BOOL_TAG { - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - } - return item.value, nil - case yaml_FLOAT_TAG, yaml_INT_TAG: - // Note: We don't actually check that a value tagged INT is a whole - // number here. We could, but cty generally doesn't care about the - // int/float distinction, so we'll just be generous and accept it. - plain := strings.Replace(src, "_", "", -1) - if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats - return numberVal, nil - } - if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberIntVal(intv), nil - } - if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes - return cty.NumberUIntVal(uintv), nil - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - return cty.NumberIntVal(intv), nil - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return cty.NumberUIntVal(uintv), nil - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - return cty.NumberIntVal(intv), nil - } - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_TIMESTAMP_TAG: - t, ok := parseTimestamp(src) - if ok { - // cty has no timestamp type, but its functions stdlib - // conventionally uses strings in an RFC3339 encoding - // to represent time, so we'll follow that convention here. - return cty.StringVal(t.Format(time.RFC3339)), nil - } - return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) - case yaml_NULL_TAG: - return cty.NullVal(cty.DynamicPseudoType), nil - case "": - return cty.StringVal(src), nil - default: - return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) - } -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} - -type mergeMapping struct{} - -var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) -var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go deleted file mode 100644 index 077fd1dd..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go +++ /dev/null @@ -1,2696 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go deleted file mode 100644 index a2dde608..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go deleted file mode 100644 index 2c314cc1..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yaml.go +++ /dev/null @@ -1,215 +0,0 @@ -// Package yaml can marshal and unmarshal cty values in YAML format. -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" - - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal reads the document found within the given source buffer -// and attempts to convert it into a value conforming to the given type -// constraint. -// -// This is an alias for Unmarshal on the predefined Converter in "Standard". -// -// An error is returned if the given source contains any YAML document -// delimiters. -func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { - return Standard.Unmarshal(src, ty) -} - -// Marshal serializes the given value into a YAML document, using a fixed -// mapping from cty types to YAML constructs. -// -// This is an alias for Marshal on the predefined Converter in "Standard". -// -// Note that unlike the function of the same name in the cty JSON package, -// this does not take a type constraint and therefore the YAML serialization -// cannot preserve late-bound type information in the serialization to be -// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type -// constraint given to Unmarshal will be decoded as if the corresponding portion -// of the input were processed with ImpliedType to find a target type. -func Marshal(v cty.Value) ([]byte, error) { - return Standard.Marshal(v) -} - -// ImpliedType analyzes the given source code and returns a suitable type that -// it could be decoded into. -// -// For a converter that is using standard YAML rather than cty-specific custom -// tags, only a subset of cty types can be produced: strings, numbers, bools, -// tuple types, and object types. -// -// This is an alias for ImpliedType on the predefined Converter in "Standard". -func ImpliedType(src []byte) (cty.Type, error) { - return Standard.ImpliedType(src) -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go deleted file mode 100644 index e25cee56..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go +++ /dev/null @@ -1,738 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go deleted file mode 100644 index 8110ce3c..00000000 --- a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go deleted file mode 100644 index 51c75aa8..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go +++ /dev/null @@ -1,211 +0,0 @@ -package msgpack - -import ( - "bytes" - "math/big" - "sort" - - "github.com/vmihailenco/msgpack" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" -) - -// Marshal produces a msgpack serialization of the given value that -// can be decoded into the given type later using Unmarshal. -// -// The given value must conform to the given type, or an error will -// be returned. -func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { - errs := val.Type().TestConformance(ty) - if errs != nil { - // Attempt a conversion - var err error - val, err = convert.Convert(val, ty) - if err != nil { - return nil, err - } - } - - // From this point onward, val can be assumed to be conforming to t. - - var path cty.Path - var buf bytes.Buffer - enc := msgpack.NewEncoder(&buf) - - err := marshal(val, ty, path, enc) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { - if val.IsMarked() { - return path.NewErrorf("value has marks, so it cannot be seralized") - } - - // If we're going to decode as DynamicPseudoType then we need to save - // dynamic type information to recover the real type. - if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { - return marshalDynamic(val, path, enc) - } - - if !val.IsKnown() { - err := enc.Encode(unknownVal) - if err != nil { - return path.NewError(err) - } - return nil - } - if val.IsNull() { - err := enc.EncodeNil() - if err != nil { - return path.NewError(err) - } - return nil - } - - // The caller should've guaranteed that the given val is conformant with - // the given type ty, so we'll proceed under that assumption here. - switch { - case ty.IsPrimitiveType(): - switch ty { - case cty.String: - err := enc.EncodeString(val.AsString()) - if err != nil { - return path.NewError(err) - } - return nil - case cty.Number: - var err error - switch { - case val.RawEquals(cty.PositiveInfinity): - err = enc.EncodeFloat64(positiveInfinity) - case val.RawEquals(cty.NegativeInfinity): - err = enc.EncodeFloat64(negativeInfinity) - default: - bf := val.AsBigFloat() - if iv, acc := bf.Int64(); acc == big.Exact { - err = enc.EncodeInt(iv) - } else if fv, acc := bf.Float64(); acc == big.Exact { - err = enc.EncodeFloat64(fv) - } else { - err = enc.EncodeString(bf.Text('f', -1)) - } - } - if err != nil { - return path.NewError(err) - } - return nil - case cty.Bool: - err := enc.EncodeBool(val.True()) - if err != nil { - return path.NewError(err) - } - return nil - default: - panic("unsupported primitive type") - } - case ty.IsListType(), ty.IsSetType(): - enc.EncodeArrayLen(val.LengthInt()) - ety := ty.ElementType() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - for it.Next() { - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - err := marshal(ev, ety, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsMapType(): - enc.EncodeMapLen(val.LengthInt()) - ety := ty.ElementType() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - for it.Next() { - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - var err error - err = marshal(ek, ek.Type(), path, enc) - if err != nil { - return err - } - err = marshal(ev, ety, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsTupleType(): - etys := ty.TupleElementTypes() - it := val.ElementIterator() - path := append(path, nil) // local override of 'path' with extra element - i := 0 - enc.EncodeArrayLen(len(etys)) - for it.Next() { - ety := etys[i] - ek, ev := it.Element() - path[len(path)-1] = cty.IndexStep{ - Key: ek, - } - err := marshal(ev, ety, path, enc) - if err != nil { - return err - } - i++ - } - return nil - case ty.IsObjectType(): - atys := ty.AttributeTypes() - path := append(path, nil) // local override of 'path' with extra element - - names := make([]string, 0, len(atys)) - for k := range atys { - names = append(names, k) - } - sort.Strings(names) - - enc.EncodeMapLen(len(names)) - - for _, k := range names { - aty := atys[k] - av := val.GetAttr(k) - path[len(path)-1] = cty.GetAttrStep{ - Name: k, - } - var err error - err = marshal(cty.StringVal(k), cty.String, path, enc) - if err != nil { - return err - } - err = marshal(av, aty, path, enc) - if err != nil { - return err - } - } - return nil - case ty.IsCapsuleType(): - return path.NewErrorf("capsule types not supported for msgpack encoding") - default: - // should never happen - return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) - } -} - -// marshalDynamic adds an extra wrapping object containing dynamic type -// information for the given value. -func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { - dv := dynamicVal{ - Value: val, - Path: path, - } - return enc.Encode(&dv) -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go deleted file mode 100644 index 6f6022e4..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go +++ /dev/null @@ -1,167 +0,0 @@ -package msgpack - -import ( - "bytes" - "fmt" - "io" - - "github.com/vmihailenco/msgpack" - msgpackcodes "github.com/vmihailenco/msgpack/codes" - "github.com/zclconf/go-cty/cty" -) - -// ImpliedType returns the cty Type implied by the structure of the given -// msgpack-compliant buffer. This function implements the default type mapping -// behavior used when decoding arbitrary msgpack without explicit cty Type -// information. -// -// The rules are as follows: -// -// msgpack strings, numbers and bools map to their equivalent primitive type in -// cty. -// -// msgpack maps become cty object types, with the attributes defined by the -// map keys and the types of their values. -// -// msgpack arrays become cty tuple types, with the elements defined by the -// types of the array members. -// -// Any nulls are typed as DynamicPseudoType, so callers of this function -// must be prepared to deal with this. Callers that do not wish to deal with -// dynamic typing should not use this function and should instead describe -// their required types explicitly with a cty.Type instance when decoding. -// -// Any unknown values are similarly typed as DynamicPseudoType, because these -// do not carry type information on the wire. -// -// Any parse errors will be returned as an error, and the type will be the -// invalid value cty.NilType. -func ImpliedType(buf []byte) (cty.Type, error) { - r := bytes.NewReader(buf) - dec := msgpack.NewDecoder(r) - - ty, err := impliedType(dec) - if err != nil { - return cty.NilType, err - } - - // We must now be at the end of the buffer - err = dec.Skip() - if err != io.EOF { - return ty, fmt.Errorf("extra bytes after msgpack value") - } - - return ty, nil -} - -func impliedType(dec *msgpack.Decoder) (cty.Type, error) { - // If this function returns with a nil error then it must have already - // consumed the next value from the decoder, since when called recursively - // the caller will be expecting to find a following value here. - - code, err := dec.PeekCode() - if err != nil { - return cty.NilType, err - } - - switch { - - case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): - err := dec.Skip() - return cty.DynamicPseudoType, err - - case code == msgpackcodes.True || code == msgpackcodes.False: - _, err := dec.DecodeBool() - return cty.Bool, err - - case msgpackcodes.IsFixedNum(code): - _, err := dec.DecodeInt64() - return cty.Number, err - - case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: - _, err := dec.DecodeInt64() - return cty.Number, err - - case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: - _, err := dec.DecodeUint64() - return cty.Number, err - - case code == msgpackcodes.Float || code == msgpackcodes.Double: - _, err := dec.DecodeFloat64() - return cty.Number, err - - case msgpackcodes.IsString(code): - _, err := dec.DecodeString() - return cty.String, err - - case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: - return impliedObjectType(dec) - - case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: - return impliedTupleType(dec) - - default: - return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) - } -} - -func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { - // If we get in here then we've already peeked the next code and know - // it's some sort of map. - l, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicPseudoType, nil - } - - var atys map[string]cty.Type - - for i := 0; i < l; i++ { - // Read the map key first. We require maps to be strings, but msgpack - // doesn't so we're prepared to error here if not. - k, err := dec.DecodeString() - if err != nil { - return cty.DynamicPseudoType, err - } - - aty, err := impliedType(dec) - if err != nil { - return cty.DynamicPseudoType, err - } - - if atys == nil { - atys = make(map[string]cty.Type) - } - atys[k] = aty - } - - if len(atys) == 0 { - return cty.EmptyObject, nil - } - - return cty.Object(atys), nil -} - -func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { - // If we get in here then we've already peeked the next code and know - // it's some sort of array. - l, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicPseudoType, nil - } - - if l == 0 { - return cty.EmptyTuple, nil - } - - etys := make([]cty.Type, l) - - for i := 0; i < l; i++ { - ety, err := impliedType(dec) - if err != nil { - return cty.DynamicPseudoType, err - } - etys[i] = ety - } - - return cty.Tuple(etys), nil -} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go deleted file mode 100644 index 51bb76a8..00000000 --- a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go +++ /dev/null @@ -1,334 +0,0 @@ -package msgpack - -import ( - "bytes" - - "github.com/vmihailenco/msgpack" - msgpackCodes "github.com/vmihailenco/msgpack/codes" - "github.com/zclconf/go-cty/cty" -) - -// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of -// the given type, returning the result. -// -// If an error is returned, the error is written with a hypothetical -// end-user that wrote the msgpack file as its audience, using cty type -// system concepts rather than Go type system concepts. -func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { - r := bytes.NewReader(b) - dec := msgpack.NewDecoder(r) - - var path cty.Path - return unmarshal(dec, ty, path) -} - -func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { - peek, err := dec.PeekCode() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - if msgpackCodes.IsExt(peek) { - // We just assume _all_ extensions are unknown values, - // since we don't have any other extensions. - dec.Skip() // skip what we've peeked - return cty.UnknownVal(ty), nil - } - if ty == cty.DynamicPseudoType { - return unmarshalDynamic(dec, path) - } - if peek == msgpackCodes.Nil { - dec.Skip() // skip what we've peeked - return cty.NullVal(ty), nil - } - - switch { - case ty.IsPrimitiveType(): - val, err := unmarshalPrimitive(dec, ty, path) - if err != nil { - return cty.NilVal, err - } - return val, nil - case ty.IsListType(): - return unmarshalList(dec, ty.ElementType(), path) - case ty.IsSetType(): - return unmarshalSet(dec, ty.ElementType(), path) - case ty.IsMapType(): - return unmarshalMap(dec, ty.ElementType(), path) - case ty.IsTupleType(): - return unmarshalTuple(dec, ty.TupleElementTypes(), path) - case ty.IsObjectType(): - return unmarshalObject(dec, ty.AttributeTypes(), path) - default: - return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) - } -} - -func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { - switch ty { - case cty.Bool: - rv, err := dec.DecodeBool() - if err != nil { - return cty.DynamicVal, path.NewErrorf("bool is required") - } - return cty.BoolVal(rv), nil - case cty.Number: - // Marshal will try int and float first, if the value can be - // losslessly represented in these encodings, and then fall - // back on a string if the number is too large or too precise. - peek, err := dec.PeekCode() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - - if msgpackCodes.IsFixedNum(peek) { - rv, err := dec.DecodeInt64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberIntVal(rv), nil - } - - switch peek { - case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: - rv, err := dec.DecodeInt64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberIntVal(rv), nil - case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: - rv, err := dec.DecodeUint64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberUIntVal(rv), nil - case msgpackCodes.Float, msgpackCodes.Double: - rv, err := dec.DecodeFloat64() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return cty.NumberFloatVal(rv), nil - default: - rv, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - v, err := cty.ParseNumberVal(rv) - if err != nil { - return cty.DynamicVal, path.NewErrorf("number is required") - } - return v, nil - } - case cty.String: - rv, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path.NewErrorf("string is required") - } - return cty.StringVal(rv), nil - default: - // should never happen - panic("unsupported primitive type") - } -} - -func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a list is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.List(ety)), nil - case length == 0: - return cty.ListValEmpty(ety), nil - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.ListVal(vals), nil -} - -func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a set is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Set(ety)), nil - case length == 0: - return cty.SetValEmpty(ety), nil - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.SetVal(vals), nil -} - -func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a map is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Map(ety)), nil - case length == 0: - return cty.MapValEmpty(ety), nil - } - - vals := make(map[string]cty.Value, length) - path = append(path, nil) - for i := 0; i < length; i++ { - key, err := dec.DecodeString() - if err != nil { - path[:len(path)-1].NewErrorf("non-string key in map") - } - - path[len(path)-1] = cty.IndexStep{ - Key: cty.StringVal(key), - } - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals[key] = val - } - - return cty.MapVal(vals), nil -} - -func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("a tuple is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Tuple(etys)), nil - case length == 0: - return cty.TupleVal(nil), nil - case length != len(etys): - return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) - } - - vals := make([]cty.Value, 0, length) - path = append(path, nil) - for i := 0; i < length; i++ { - path[len(path)-1] = cty.IndexStep{ - Key: cty.NumberIntVal(int64(i)), - } - ety := etys[i] - - val, err := unmarshal(dec, ety, path) - if err != nil { - return cty.DynamicVal, err - } - - vals = append(vals, val) - } - - return cty.TupleVal(vals), nil -} - -func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeMapLen() - if err != nil { - return cty.DynamicVal, path.NewErrorf("an object is required") - } - - switch { - case length < 0: - return cty.NullVal(cty.Object(atys)), nil - case length == 0: - return cty.ObjectVal(nil), nil - case length != len(atys): - return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", - len(atys), length) - } - - vals := make(map[string]cty.Value, length) - path = append(path, nil) - for i := 0; i < length; i++ { - key, err := dec.DecodeString() - if err != nil { - return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") - } - - path[len(path)-1] = cty.IndexStep{ - Key: cty.StringVal(key), - } - aty, exists := atys[key] - if !exists { - return cty.DynamicVal, path.NewErrorf("unsupported attribute") - } - - val, err := unmarshal(dec, aty, path) - if err != nil { - return cty.DynamicVal, err - } - - vals[key] = val - } - - return cty.ObjectVal(vals), nil -} - -func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { - length, err := dec.DecodeArrayLen() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - - switch { - case length == -1: - return cty.NullVal(cty.DynamicPseudoType), nil - case length != 2: - return cty.DynamicVal, path.NewErrorf( - "dynamic value array must have exactly two elements", - ) - } - - typeJSON, err := dec.DecodeBytes() - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - var ty cty.Type - err = (&ty).UnmarshalJSON(typeJSON) - if err != nil { - return cty.DynamicVal, path.NewError(err) - } - - return unmarshal(dec, ty, path) -} diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock deleted file mode 100644 index 3be12ac8..00000000 --- a/vendor/go.opencensus.io/Gopkg.lock +++ /dev/null @@ -1,231 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" - name = "git.apache.org/thrift.git" - packages = ["lib/go/thrift"] - pruneopts = "UT" - revision = "6e67faa92827ece022380b211c2caaadd6145bf5" - source = "github.com/apache/thrift" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" - name = "github.com/openzipkin/zipkin-go" - packages = [ - ".", - "idgenerator", - "model", - "propagation", - "reporter", - "reporter/http", - ] - pruneopts = "UT" - revision = "d455a5674050831c1e187644faa4046d653433c2" - version = "v0.1.1" - -[[projects]] - digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - branch = "master" - digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "e21acd801f91da814261b938941d193bb036441a" - -[[projects]] - branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" - -[[projects]] - digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" - version = "v1.14.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "git.apache.org/thrift.git/lib/go/thrift", - "github.com/golang/protobuf/proto", - "github.com/openzipkin/zipkin-go", - "github.com/openzipkin/zipkin-go/model", - "github.com/openzipkin/zipkin-go/reporter", - "github.com/openzipkin/zipkin-go/reporter/http", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "golang.org/x/net/context", - "golang.org/x/net/http2", - "google.golang.org/api/support/bundler", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/grpclog", - "google.golang.org/grpc/metadata", - "google.golang.org/grpc/stats", - "google.golang.org/grpc/status", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml deleted file mode 100644 index a9f3cd68..00000000 --- a/vendor/go.opencensus.io/Gopkg.toml +++ /dev/null @@ -1,36 +0,0 @@ -# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" -# to avoid locking to a particular minor version which can cause dep to not be -# able to find a satisfying dependency graph. - -[[constraint]] - branch = "master" - name = "git.apache.org/thrift.git" - source = "github.com/apache/thrift" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.0.0" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go" - version = ">=0.1.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = ">=0.8.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/api" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.11.3" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index 457866cb..b3ce3df3 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -8,7 +8,7 @@ ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) GOTEST_OPT?=-v -race -timeout 30s GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic GOTEST=go test -GOFMT=gofmt +GOIMPORTS=goimports GOLINT=golint GOVET=go vet EMBEDMD=embedmd @@ -17,14 +17,14 @@ TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packag TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') -.DEFAULT_GOAL := fmt-lint-vet-embedmd-test +.DEFAULT_GOAL := imports-lint-vet-embedmd-test -.PHONY: fmt-lint-vet-embedmd-test -fmt-lint-vet-embedmd-test: fmt lint vet embedmd test +.PHONY: imports-lint-vet-embedmd-test +imports-lint-vet-embedmd-test: imports lint vet embedmd test # TODO enable test-with-coverage in tavis .PHONY: travis-ci -travis-ci: fmt lint vet embedmd test test-386 +travis-ci: imports lint vet embedmd test test-386 all-pkgs: @echo $(ALL_PKGS) | tr ' ' '\n' | sort @@ -44,15 +44,15 @@ test-386: test-with-coverage: $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ +.PHONY: imports +imports: + @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ + if [ "$$IMPORTSOUT" ]; then \ + echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ + echo "$$IMPORTSOUT\n"; \ exit 1; \ else \ - echo "Fmt finished successfully"; \ + echo "Imports finished successfully"; \ fi .PHONY: lint @@ -91,6 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/tools/cmd/cover go get -u golang.org/x/lint/golint + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/tools/cmd/goimports go get -u github.com/rakyll/embedmd diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index fabab2e0..1d7e8371 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -9,6 +9,8 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. Currently it consists of three major components: tags, stats and tracing. +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). + ## Installation ``` @@ -57,6 +59,7 @@ can implement their own exporters by implementing the exporter interfaces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats * [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces ## Overview @@ -78,7 +81,7 @@ Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go -ctx, err = tag.New(ctx, +ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) @@ -261,3 +264,4 @@ release in which the functionality was marked *Deprecated*. [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite [exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml index 12bd7c4c..d08f0eda 100644 --- a/vendor/go.opencensus.io/appveyor.yml +++ b/vendor/go.opencensus.io/appveyor.yml @@ -6,13 +6,12 @@ clone_folder: c:\gopath\src\go.opencensus.io environment: GOPATH: 'c:\gopath' - GOVERSION: '1.11' GO111MODULE: 'on' CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 +stack: go 1.11 + +before_test: - go version - go env diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod index cb4de80f..c867df5f 100644 --- a/vendor/go.opencensus.io/go.mod +++ b/vendor/go.opencensus.io/go.mod @@ -1,12 +1,15 @@ module go.opencensus.io require ( + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 github.com/golang/protobuf v1.3.1 github.com/google/go-cmp v0.3.0 - github.com/hashicorp/golang-lru v0.5.1 - golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 + github.com/stretchr/testify v1.4.0 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) + +go 1.13 diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum index 0b948c2b..01c02972 100644 --- a/vendor/go.opencensus.io/go.sum +++ b/vendor/go.opencensus.io/go.sum @@ -1,8 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -10,20 +14,24 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -45,6 +53,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -58,4 +67,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index 9a638781..81dc7183 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -33,5 +33,5 @@ var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // end as a monotonic time. // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Now().Sub(start)) + return start.Add(time.Since(start)) } diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index 626d7364..e5e4b436 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.22.0" + return "0.23.0" } diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index 2f1c7f00..9ad88521 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -68,7 +68,7 @@ func ParseTraceID(tid string) (trace.TraceID, bool) { return trace.TraceID{}, false } b, err := hex.DecodeString(tid) - if err != nil { + if err != nil || len(b) > 16 { return trace.TraceID{}, false } var traceID trace.TraceID @@ -90,7 +90,7 @@ func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { return trace.SpanID{}, false } b, err := hex.DecodeString(sid) - if err != nil { + if err != nil || len(b) > 8 { return trace.SpanID{}, false } start := 8 - len(b) diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 4f6404fa..c7ea6423 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -70,6 +70,12 @@ type Handler struct { // from the information found in the incoming HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -87,7 +93,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if isHealthEndpoint(r.URL.Path) { + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { return r, func() {} } var name string @@ -128,7 +134,7 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ // TODO: Handle cases where ContentLength is not set. } else if r.ContentLength > 0 { span.AddMessageReceiveEvent(0, /* TODO: messageID */ - int64(r.ContentLength), -1) + r.ContentLength, -1) } return r.WithContext(ctx), span.End } @@ -174,8 +180,6 @@ type trackingResponseWriter struct { // Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -var logTagsErrorOnce sync.Once - func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 63bbcda5..ee372904 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -92,38 +92,38 @@ var ( // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. - Host, _ = tag.NewKey("http.host") + Host = tag.MustNewKey("http.host") // StatusCode is the numeric HTTP response status code, // or "error" if a transport error occurred and no status code was read. - StatusCode, _ = tag.NewKey("http.status") + StatusCode = tag.MustNewKey("http.status") // Path is the URL path (not including query string) in the request. // // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. - Path, _ = tag.NewKey("http.path") + Path = tag.MustNewKey("http.path") // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method, _ = tag.NewKey("http.method") + Method = tag.MustNewKey("http.method") // KeyServerRoute is a low cardinality string representing the logical // handler of the request. This is usually the pattern registered on the a // ServeMux (or similar string). - KeyServerRoute, _ = tag.NewKey("http_server_route") + KeyServerRoute = tag.MustNewKey("http_server_route") ) // Client tag keys. var ( // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod, _ = tag.NewKey("http_client_method") + KeyClientMethod = tag.MustNewKey("http_client_method") // KeyClientPath is the URL path (not including query string). - KeyClientPath, _ = tag.NewKey("http_client_path") + KeyClientPath = tag.MustNewKey("http_client_path") // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus, _ = tag.NewKey("http_client_status") + KeyClientStatus = tag.MustNewKey("http_client_status") // KeyClientHost is the value of the request Host header. - KeyClientHost, _ = tag.NewKey("http_client_host") + KeyClientHost = tag.MustNewKey("http_client_host") ) // Default distributions used by views in this package. diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index c23b97fb..ed3a5db5 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -186,6 +186,8 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeCancelled case http.StatusBadRequest: code = trace.StatusCodeInvalidArgument + case http.StatusUnprocessableEntity: + code = trace.StatusCodeInvalidArgument case http.StatusGatewayTimeout: code = trace.StatusCodeDeadlineExceeded case http.StatusNotFound: @@ -202,7 +204,10 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists } + return trace.Status{Code: code, Message: codeToStr[code]} } diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index ad469118..2b972834 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -31,10 +31,19 @@ func init() { } } +// Recorder provides an interface for exporting measurement information from +// the static Record method by using the WithRecorder option. +type Recorder interface { + // Record records a set of measurements associated with the given tags and attachments. + // The second argument is a `[]Measurement`. + Record(*tag.Map, interface{}, map[string]interface{}) +} + type recordOptions struct { attachments metricdata.Attachments mutators []tag.Mutator measurements []Measurement + recorder Recorder } // WithAttachments applies provided exemplar attachments. @@ -58,6 +67,14 @@ func WithMeasurements(measurements ...Measurement) Options { } } +// WithRecorder records the measurements to the specified `Recorder`, rather +// than to the global metrics recorder. +func WithRecorder(meter Recorder) Options { + return func(ro *recordOptions) { + ro.recorder = meter + } +} + // Options apply changes to recordOptions. type Options func(*recordOptions) @@ -93,6 +110,9 @@ func RecordWithOptions(ctx context.Context, ros ...Options) error { return nil } recorder := internal.DefaultRecorder + if o.recorder != nil { + recorder = o.recorder.Record + } if recorder == nil { return nil } diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go index 6931a5f2..73639965 100644 --- a/vendor/go.opencensus.io/stats/units.go +++ b/vendor/go.opencensus.io/stats/units.go @@ -22,4 +22,5 @@ const ( UnitDimensionless = "1" UnitBytes = "By" UnitMilliseconds = "ms" + UnitSeconds = "s" ) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index b7f169b4..9d709372 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -82,7 +82,7 @@ func Sum() *Aggregation { // Distribution indicates that the desired aggregation is // a histogram distribution. // -// An distribution aggregation may contain a histogram of the values in the +// A distribution aggregation may contain a histogram of the values in the // population. The bucket boundaries for that histogram are described // by the bounds. This defines len(bounds)+1 buckets. // @@ -99,13 +99,14 @@ func Sum() *Aggregation { // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ + agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, } + agg.newData = func() AggregationData { + return newDistributionData(agg) + } + return agg } // LastValue only reports the last value recorded using this diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index d500e67f..f331d456 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -128,12 +128,12 @@ type DistributionData struct { bounds []float64 // histogram distribution of the values } -func newDistributionData(bounds []float64) *DistributionData { - bucketCount := len(bounds) + 1 +func newDistributionData(agg *Aggregation) *DistributionData { + bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: bounds, + bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, } diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index dced225c..7bbedfe1 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -29,7 +29,7 @@ // LastValue just keeps track of the most recently recorded measurement value. // All aggregations are cumulative. // -// Views can be registerd and unregistered at any time during program execution. +// Views can be registered and unregistered at any time during program execution. // // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go index 7cb59718..73ba11f5 100644 --- a/vendor/go.opencensus.io/stats/view/export.go +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -14,13 +14,6 @@ package view -import "sync" - -var ( - exportersMu sync.RWMutex // guards exporters - exporters = make(map[Exporter]struct{}) -) - // Exporter exports the collected records as view data. // // The ExportView method should return quickly; if an @@ -43,16 +36,10 @@ type Exporter interface { // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - exporters[e] = struct{}{} + defaultWorker.RegisterExporter(e) } // UnregisterExporter unregisters an exporter. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - delete(exporters, e) + defaultWorker.UnregisterExporter(e) } diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 37f88e1d..293b54ec 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -30,7 +30,7 @@ import ( ) // View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function to be before data will be +// Views need to be passed to the Register function before data will be // collected and sent to Exporters. type View struct { Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. @@ -43,7 +43,7 @@ type View struct { // Measure is a stats.Measure to aggregate in this view. Measure stats.Measure - // Aggregation is the aggregation function tp apply to the set of Measurements. + // Aggregation is the aggregation function to apply to the set of Measurements. Aggregation *Aggregation } @@ -189,7 +189,7 @@ func (r *Row) String() string { } // Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even both rows have the same tags but the tags appear in +// by the key name. Even if both rows have the same tags but the tags appear in // different orders it will return false. func (r *Row) Equal(other *Row) bool { if r == other { diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go index f67b5c46..5e1656a1 100644 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -18,6 +18,8 @@ package view import ( "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" ) @@ -85,12 +87,21 @@ func viewToMetricDescriptor(v *View) *metricdata.Descriptor { return &metricdata.Descriptor{ Name: v.Name, Description: v.Description, - Unit: getUnit(v.Measure.Unit()), + Unit: convertUnit(v), Type: getType(v), LabelKeys: getLabelKeys(v), } } +func convertUnit(v *View) metricdata.Unit { + switch v.Aggregation.Type { + case AggTypeCount: + return metricdata.UnitDimensionless + default: + return getUnit(v.Measure.Unit()) + } +} + func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { labelValues := []metricdata.LabelValue{} tagMap := make(map[string]string) @@ -116,7 +127,7 @@ func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Ti } } -func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTime time.Time) *metricdata.Metric { if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { startTime = time.Time{} @@ -135,6 +146,7 @@ func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricda m := &metricdata.Metric{ Descriptor: *v.metricDescriptor, TimeSeries: ts, + Resource: r, } return m } diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2f3c018a..ab8bfd46 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -20,6 +20,8 @@ import ( "sync" "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" @@ -28,7 +30,7 @@ import ( ) func init() { - defaultWorker = newWorker() + defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record } @@ -47,8 +49,69 @@ type worker struct { c chan command quit, done chan bool mu sync.RWMutex + r *resource.Resource + + exportersMu sync.RWMutex + exporters map[Exporter]struct{} +} + +// Meter defines an interface which allows a single process to maintain +// multiple sets of metrics exports (intended for the advanced case where a +// single process wants to report metrics about multiple objects, such as +// multiple databases or HTTP services). +// +// Note that this is an advanced use case, and the static functions in this +// module should cover the common use cases. +type Meter interface { + stats.Recorder + // Find returns a registered view associated with this name. + // If no registered view is found, nil is returned. + Find(name string) *View + // Register begins collecting data for the given views. + // Once a view is registered, it reports data to the registered exporters. + Register(views ...*View) error + // Unregister the given views. Data will not longer be exported for these views + // after Unregister returns. + // It is not necessary to unregister from views you expect to collect for the + // duration of your program execution. + Unregister(views ...*View) + // SetReportingPeriod sets the interval between reporting aggregated views in + // the program. If duration is less than or equal to zero, it enables the + // default behavior. + // + // Note: each exporter makes different promises about what the lowest supported + // duration is. For example, the Stackdriver exporter recommends a value no + // lower than 1 minute. Consult each exporter per your needs. + SetReportingPeriod(time.Duration) + + // RegisterExporter registers an exporter. + // Collected data will be reported via all the + // registered exporters. Once you no longer + // want data to be exported, invoke UnregisterExporter + // with the previously registered exporter. + // + // Binaries can register exporters, libraries shouldn't register exporters. + RegisterExporter(Exporter) + // UnregisterExporter unregisters an exporter. + UnregisterExporter(Exporter) + // SetResource may be used to set the Resource associated with this registry. + // This is intended to be used in cases where a single process exports metrics + // for multiple Resources, typically in a multi-tenant situation. + SetResource(*resource.Resource) + + // Start causes the Meter to start processing Record calls and aggregating + // statistics as well as exporting data. + Start() + // Stop causes the Meter to stop processing calls and terminate data export. + Stop() + + // RetrieveData gets a snapshot of the data collected for the the view registered + // with the given name. It is intended for testing only. + RetrieveData(viewName string) ([]*Row, error) } +var _ Meter = (*worker)(nil) + var defaultWorker *worker var defaultReportingDuration = 10 * time.Second @@ -56,11 +119,17 @@ var defaultReportingDuration = 10 * time.Second // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. func Find(name string) (v *View) { + return defaultWorker.Find(name) +} + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func (w *worker) Find(name string) (v *View) { req := &getViewByNameReq{ name: name, c: make(chan *getViewByNameResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.v } @@ -68,11 +137,17 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { + return defaultWorker.Register(views...) +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func (w *worker) Register(views ...*View) error { req := ®isterViewReq{ views: views, err: make(chan error), } - defaultWorker.c <- req + w.c <- req return <-req.err } @@ -81,6 +156,14 @@ func Register(views ...*View) error { // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. func Unregister(views ...*View) { + defaultWorker.Unregister(views...) +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func (w *worker) Unregister(views ...*View) { names := make([]string, len(views)) for i := range views { names[i] = views[i].Name @@ -89,31 +172,42 @@ func Unregister(views ...*View) { views: names, done: make(chan struct{}), } - defaultWorker.c <- req + w.c <- req <-req.done } // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { + return defaultWorker.RetrieveData(viewName) +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func (w *worker) RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), v: viewName, c: make(chan *retrieveDataResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.rows, resp.err } func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + defaultWorker.Record(tags, ms, attachments) +} + +// Record records a set of measurements ms associated with the given tags and attachments. +func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ tm: tags, ms: ms.([]stats.Measurement), attachments: attachments, t: time.Now(), } - defaultWorker.c <- req + w.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in @@ -124,17 +218,31 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { + defaultWorker.SetReportingPeriod(d) +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func (w *worker) SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s req := &setReportingPeriodReq{ d: d, c: make(chan bool), } - defaultWorker.c <- req + w.c <- req <-req.c // don't return until the timer is set to the new duration. } -func newWorker() *worker { +// NewMeter constructs a Meter instance. You should only need to use this if +// you need to separate out Measurement recordings and View aggregations within +// a single process. +func NewMeter() Meter { return &worker{ measures: make(map[string]*measureRef), views: make(map[string]*viewInternal), @@ -143,9 +251,23 @@ func newWorker() *worker { c: make(chan command, 1024), quit: make(chan bool), done: make(chan bool), + + exporters: make(map[Exporter]struct{}), } } +// SetResource associates all data collected by this Meter with the specified +// resource. This resource is reported when using metricexport.ReadAndExport; +// it is not provided when used with ExportView/RegisterExporter, because that +// interface does not provide a means for reporting the Resource. +func (w *worker) SetResource(r *resource.Resource) { + w.r = r +} + +func (w *worker) Start() { + go w.start() +} + func (w *worker) start() { prodMgr := metricproducer.GlobalManager() prodMgr.AddProducer(w) @@ -155,7 +277,7 @@ func (w *worker) start() { case cmd := <-w.c: cmd.handleCommand(w) case <-w.timer.C: - w.reportUsage(time.Now()) + w.reportUsage() case <-w.quit: w.timer.Stop() close(w.c) @@ -165,7 +287,7 @@ func (w *worker) start() { } } -func (w *worker) stop() { +func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) @@ -202,44 +324,45 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return x, nil } w.views[vi.view.Name] = vi + w.startTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil } -func (w *worker) unregisterView(viewName string) { +func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() - delete(w.views, viewName) + delete(w.views, v.view.Name) + delete(w.startTimes, v) + if measure := w.measures[v.view.Measure.Name()]; measure != nil { + delete(measure.views, v) + } } -func (w *worker) reportView(v *viewInternal, now time.Time) { +func (w *worker) reportView(v *viewInternal) { if !v.isSubscribed() { return } rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } viewData := &Data{ View: v.view, Start: w.startTimes[v], End: time.Now(), Rows: rows, } - exportersMu.Lock() - for e := range exporters { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + for e := range w.exporters { e.ExportView(viewData) } - exportersMu.Unlock() } -func (w *worker) reportUsage(now time.Time) { +func (w *worker) reportUsage() { w.mu.Lock() defer w.mu.Unlock() for _, v := range w.views { - w.reportView(v, now) + w.reportView(v) } } @@ -248,11 +371,6 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { return nil } - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - var startTime time.Time if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { @@ -261,7 +379,7 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { startTime = w.startTimes[v] } - return viewToMetric(v, now, startTime) + return viewToMetric(v, w.r, now, startTime) } // Read reads all view data and returns them as metrics. @@ -279,3 +397,17 @@ func (w *worker) Read() []*metricdata.Metric { } return metrics } + +func (w *worker) RegisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + w.exporters[e] = struct{}{} +} + +func (w *worker) UnregisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + delete(w.exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 0267e179..9ac4cc05 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -95,7 +95,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { } // Report pending data for this view before removing it. - w.reportView(vi, time.Now()) + w.reportView(vi) vi.unsubscribe() if !vi.isSubscribed() { @@ -103,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - w.unregisterView(name) + w.unregisterView(vi) } cmd.done <- struct{}{} } @@ -163,7 +163,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) } } } diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go index 4e63d08c..71ec9136 100644 --- a/vendor/go.opencensus.io/tag/key.go +++ b/vendor/go.opencensus.io/tag/key.go @@ -21,7 +21,7 @@ type Key struct { } // NewKey creates or retrieves a string key identified by name. -// Calling NewKey consequently with the same name returns the same key. +// Calling NewKey more than once with the same name returns the same key. func NewKey(name string) (Key, error) { if !checkKeyName(name) { return Key{}, errInvalidKeyName @@ -29,8 +29,7 @@ func NewKey(name string) (Key, error) { return Key{name: name}, nil } -// MustNewKey creates or retrieves a string key identified by name. -// An invalid key name raises a panic. +// MustNewKey returns a key with the given name, and panics if name is an invalid key name. func MustNewKey(name string) Key { k, err := NewKey(name) if err != nil { diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go index f8b58276..c242e695 100644 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -168,7 +168,7 @@ func Encode(m *Map) []byte { eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } - eg.writeByte(byte(tagsVersionID)) + eg.writeByte(tagsVersionID) for k, v := range m.m { if v.m.ttl.ttl == valueTTLUnlimitedPropagation { eg.writeByte(byte(keyTypeString)) diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 3f80a336..908c2497 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -15,23 +15,47 @@ package trace import ( - "github.com/hashicorp/golang-lru/simplelru" + "github.com/golang/groupcache/lru" ) +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. type lruMap struct { - simpleLruMap *simplelru.LRU + cacheKeys map[lru.Key]bool + cache *lru.Cache droppedCount int } func newLruMap(size int) *lruMap { - lm := &lruMap{} - lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } return lm } -func (lm *lruMap) add(key, value interface{}) { - evicted := lm.simpleLruMap.Add(key, value) - if evicted { - lm.droppedCount++ +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := make([]interface{}, len(lm.cacheKeys)) + for k := range lm.cacheKeys { + keys = append(keys, k) } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) } diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 38ead7bf..125e2cd9 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -296,7 +296,7 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.lruAttributes.simpleLruMap.Len() > 0 { + if s.lruAttributes.len() > 0 { sd.Attributes = s.lruAttributesToAttributeMap() sd.DroppedAttributeCount = s.lruAttributes.droppedCount } @@ -345,7 +345,7 @@ func (s *Span) SetStatus(status Status) { } func (s *Span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0) + linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) } @@ -353,7 +353,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link { } func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0) + messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) } @@ -361,7 +361,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { } func (s *Span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0) + annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) } @@ -369,9 +369,9 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { } func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}) - for _, key := range s.lruAttributes.simpleLruMap.Keys() { - value, ok := s.lruAttributes.simpleLruMap.Get(key) + attributes := make(map[string]interface{}, s.lruAttributes.len()) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) if ok { keyStr := key.(string) attributes[keyStr] = value @@ -420,7 +420,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in var m map[string]interface{} s.mu.Lock() if len(attributes) != 0 { - m = make(map[string]interface{}) + m = make(map[string]interface{}, len(attributes)) copyAttributes(m, attributes) } s.annotations.add(Annotation{ @@ -436,7 +436,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { var a map[string]interface{} s.mu.Lock() if len(attributes) != 0 { - a = make(map[string]interface{}) + a = make(map[string]interface{}, len(attributes)) copyAttributes(a, attributes) } s.annotations.add(Annotation{ diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index aeb73f81..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f195..00000000 --- a/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204..00000000 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d0407759..00000000 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/golang.org/x/lint/.travis.yml b/vendor/golang.org/x/lint/.travis.yml new file mode 100644 index 00000000..50553ebd --- /dev/null +++ b/vendor/golang.org/x/lint/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.11.x + - master + +go_import_path: golang.org/x/lint + +install: + - go get -t -v ./... + +script: + - go test -v -race ./... + +matrix: + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/golang.org/x/lint/CONTRIBUTING.md b/vendor/golang.org/x/lint/CONTRIBUTING.md new file mode 100644 index 00000000..1fadda62 --- /dev/null +++ b/vendor/golang.org/x/lint/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to Golint + +## Before filing an issue: + +### Are you having trouble building golint? + +Check you have the latest version of its dependencies. Run +``` +go get -u golang.org/x/lint/golint +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/golang.org/x/lint/LICENSE b/vendor/golang.org/x/lint/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/lint/README.md b/vendor/golang.org/x/lint/README.md new file mode 100644 index 00000000..4968b13a --- /dev/null +++ b/vendor/golang.org/x/lint/README.md @@ -0,0 +1,88 @@ +Golint is a linter for Go source code. + +[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) + +## Installation + +Golint requires a +[supported release of Go](https://golang.org/doc/devel/release.html#policy). + + go get -u golang.org/x/lint/golint + +To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. + +## Usage + +Invoke `golint` with one or more filenames, directories, or packages named +by its import path. Golint uses the same +[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as +the `go` command and therefore +also supports relative import paths like `./...`. Additionally the `...` +wildcard can be used as suffix on relative and absolute file paths to recurse +into them. + +The output of this tool is a list of suggestions in Vim quickfix format, +which is accepted by lots of different editors. + +## Purpose + +Golint differs from gofmt. Gofmt reformats Go source code, whereas +golint prints out style mistakes. + +Golint differs from govet. Govet is concerned with correctness, whereas +golint is concerned with coding style. Golint is in use at Google, and it +seeks to match the accepted style of the open source Go project. + +The suggestions made by golint are exactly that: suggestions. +Golint is not perfect, and has both false positives and false negatives. +Do not treat its output as a gold standard. We will not be adding pragmas +or other knobs to suppress specific warnings, so do not expect or require +code to be completely "lint-free". +In short, this tool is not, and will never be, trustworthy enough for its +suggestions to be enforced automatically, for example as part of a build process. +Golint makes suggestions for many of the mechanically checkable items listed in +[Effective Go](https://golang.org/doc/effective_go.html) and the +[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). + +## Scope + +Golint is meant to carry out the stylistic conventions put forth in +[Effective Go](https://golang.org/doc/effective_go.html) and +[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). +Changes that are not aligned with those documents will not be considered. + +## Contributions + +Contributions to this project are welcome provided they are [in scope](#scope), +though please send mail before starting work on anything major. +Contributors retain their copyright, so we need you to fill out +[a short form](https://developers.google.com/open-source/cla/individual) +before we can accept your contribution. + +## Vim + +Add this to your ~/.vimrc: + + set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running `:Lint` will run golint on the current file and populate the quickfix list. + +Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` + + autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow + + +## Emacs + +Add this to your `.emacs` file: + + (add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/")) + (require 'golint) + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running M-x golint will run golint on the current file. + +For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod new file mode 100644 index 00000000..b32309c4 --- /dev/null +++ b/vendor/golang.org/x/lint/go.mod @@ -0,0 +1,5 @@ +module golang.org/x/lint + +go 1.11 + +require golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum new file mode 100644 index 00000000..2ad45cae --- /dev/null +++ b/vendor/golang.org/x/lint/go.sum @@ -0,0 +1,12 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/lint/golint/golint.go b/vendor/golang.org/x/lint/golint/golint.go new file mode 100644 index 00000000..ac024b6d --- /dev/null +++ b/vendor/golang.org/x/lint/golint/golint.go @@ -0,0 +1,159 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// golint lints the Go source files named on its command line. +package main + +import ( + "flag" + "fmt" + "go/build" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/lint" +) + +var ( + minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it") + setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found") + suggestions int +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + flag.Usage = usage + flag.Parse() + + if flag.NArg() == 0 { + lintDir(".") + } else { + // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to + // directory, file or package targets. The distinction affects which + // checks are run. It is no valid to mix target types. + var dirsRun, filesRun, pkgsRun int + var args []string + for _, arg := range flag.Args() { + if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { + dirsRun = 1 + for _, dirname := range allPackagesInFS(arg) { + args = append(args, dirname) + } + } else if isDir(arg) { + dirsRun = 1 + args = append(args, arg) + } else if exists(arg) { + filesRun = 1 + args = append(args, arg) + } else { + pkgsRun = 1 + args = append(args, arg) + } + } + + if dirsRun+filesRun+pkgsRun != 1 { + usage() + os.Exit(2) + } + switch { + case dirsRun == 1: + for _, dir := range args { + lintDir(dir) + } + case filesRun == 1: + lintFiles(args...) + case pkgsRun == 1: + for _, pkg := range importPaths(args) { + lintPackage(pkg) + } + } + } + + if *setExitStatus && suggestions > 0 { + fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions) + os.Exit(1) + } +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func lintFiles(filenames ...string) { + files := make(map[string][]byte) + for _, filename := range filenames { + src, err := ioutil.ReadFile(filename) + if err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + files[filename] = src + } + + l := new(lint.Linter) + ps, err := l.LintFiles(files) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + for _, p := range ps { + if p.Confidence >= *minConfidence { + fmt.Printf("%v: %s\n", p.Position, p.Text) + suggestions++ + } + } +} + +func lintDir(dirname string) { + pkg, err := build.ImportDir(dirname, 0) + lintImportedPackage(pkg, err) +} + +func lintPackage(pkgname string) { + pkg, err := build.Import(pkgname, ".", 0) + lintImportedPackage(pkg, err) +} + +func lintImportedPackage(pkg *build.Package, err error) { + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + // Don't complain if the failure is due to no Go source files. + return + } + fmt.Fprintln(os.Stderr, err) + return + } + + var files []string + files = append(files, pkg.GoFiles...) + files = append(files, pkg.CgoFiles...) + files = append(files, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range files { + files[i] = filepath.Join(pkg.Dir, f) + } + } + // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles) + + lintFiles(files...) +} diff --git a/vendor/golang.org/x/lint/golint/import.go b/vendor/golang.org/x/lint/golint/import.go new file mode 100644 index 00000000..2ba9dea7 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/import.go @@ -0,0 +1,309 @@ +package main + +/* + +This file holds a direct copy of the import path matching code of +https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be +replaced when https://golang.org/issue/8768 is resolved. + +It has been updated to follow upstream changes in a few ways. + +*/ + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var ( + buildContext = build.Default + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") +) + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if a == "all" || a == "std" { + out = append(out, allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func importPaths(args []string) []string { + args = importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, allPackagesInFS(a)...) + } else { + out = append(out, allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return func(name string) bool { + return reg.MatchString(name) + } +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages) +// or a path including "...". +func allPackages(pattern string) []string { + pkgs := matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if pattern != "all" && pattern != "std" { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !buildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + // Commands + cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) + filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == cmd { + return nil + } + name := path[len(cmd):] + if !treeCanMatch(name) { + return filepath.SkipDir + } + // Commands are all in cmd/, not in subdirectories. + if strings.Contains(name, string(filepath.Separator)) { + return filepath.SkipDir + } + + // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. + name = "cmd/" + name + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + + for _, src := range buildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func allPackagesInFS(pattern string) []string { + pkgs := matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + if _, err = build.ImportDir(path, 0); err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} diff --git a/vendor/golang.org/x/lint/golint/importcomment.go b/vendor/golang.org/x/lint/golint/importcomment.go new file mode 100644 index 00000000..d5b32f73 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/importcomment.go @@ -0,0 +1,13 @@ +// Copyright (c) 2018 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// +build go1.12 + +// Require use of the correct import path only for Go 1.12+ users, so +// any breakages coincide with people updating their CI configs or +// whatnot. + +package main // import "golang.org/x/lint/golint" diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go new file mode 100644 index 00000000..7d813e06 --- /dev/null +++ b/vendor/golang.org/x/lint/lint.go @@ -0,0 +1,1615 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lint contains a linter for Go source code. +package lint // import "golang.org/x/lint" + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/gcexportdata" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// A Linter lints Go source code. +type Linter struct { +} + +// Problem represents a problem in some source code. +type Problem struct { + Position token.Position // position in source file + Text string // the prose that describes the problem + Link string // (optional) the link to the style guide for the problem + Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness + LineText string // the source line + Category string // a short name for the general category of the problem + + // If the problem has a suggested fix (the minority case), + // ReplacementLine is a full replacement for the relevant line of the source file. + ReplacementLine string +} + +func (p *Problem) String() string { + if p.Link != "" { + return p.Text + "\n\n" + p.Link + } + return p.Text +} + +type byPosition []Problem + +func (p byPosition) Len() int { return len(p) } +func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p byPosition) Less(i, j int) bool { + pi, pj := p[i].Position, p[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return p[i].Text < p[j].Text +} + +// Lint lints src. +func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { + return l.LintFiles(map[string][]byte{filename: src}) +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { + pkg := &pkg{ + fset: token.NewFileSet(), + files: make(map[string]*file), + } + var pkgName string + for filename, src := range files { + if isGenerated(src) { + continue // See issue #239 + } + f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} + +// pkg represents a package being linted. +type pkg struct { + fset *token.FileSet + files map[string]*file + + typesPkg *types.Package + typesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + sortable map[string]bool + // main is whether this is a "main" package. + main bool + + problems []Problem +} + +func (p *pkg) lint() []Problem { + if err := p.typeCheck(); err != nil { + /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. + if e, ok := err.(types.Error); ok { + pos := p.fset.Position(e.Pos) + conf := 1.0 + if strings.Contains(e.Msg, "can't find import: ") { + // Golint is probably being run in a context that doesn't support + // typechecking (e.g. package files aren't found), so don't warn about it. + conf = 0 + } + if conf > 0 { + p.errorfAt(pos, conf, category("typechecking"), e.Msg) + } + + // TODO(dsymonds): Abort if !e.Soft? + } + */ + } + + p.scanSortable() + p.main = p.isMain() + + for _, f := range p.files { + f.lint() + } + + sort.Sort(byPosition(p.problems)) + + return p.problems +} + +// file represents a file being linted. +type file struct { + pkg *pkg + f *ast.File + fset *token.FileSet + src []byte + filename string +} + +func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } + +func (f *file) lint() { + f.lintPackageComment() + f.lintImports() + f.lintBlankImports() + f.lintExported() + f.lintNames() + f.lintElses() + f.lintRanges() + f.lintErrorf() + f.lintErrors() + f.lintErrorStrings() + f.lintReceiverNames() + f.lintIncDec() + f.lintErrorReturn() + f.lintUnexportedReturn() + f.lintTimeNames() + f.lintContextKeyTypes() + f.lintContextArgs() +} + +type link string +type category string + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +// It returns the new Problem. +func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { + pos := f.fset.Position(n.Pos()) + if pos.Filename == "" { + pos.Filename = f.filename + } + return f.pkg.errorfAt(pos, confidence, args...) +} + +func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { + problem := Problem{ + Position: pos, + Confidence: confidence, + } + if pos.Filename != "" { + // The file might not exist in our mapping if a //line directive was encountered. + if f, ok := p.files[pos.Filename]; ok { + problem.LineText = srcLine(f.src, pos) + } + } + +argLoop: + for len(args) > 1 { // always leave at least the format string in args + switch v := args[0].(type) { + case link: + problem.Link = string(v) + case category: + problem.Category = string(v) + default: + break argLoop + } + args = args[1:] + } + + problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) + + p.problems = append(p.problems, problem) + return &p.problems[len(p.problems)-1] +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +func (p *pkg) typeCheck() error { + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *file + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.f) + } + pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.typesPkg = pkg + p.typesInfo = info + return err +} + +func (p *pkg) typeOf(expr ast.Expr) types.Type { + if p.typesInfo == nil { + return nil + } + return p.typesInfo.TypeOf(expr) +} + +func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} + +// scopeOf returns the tightest scope encompassing id. +func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { + var scope *types.Scope + if obj := p.typesInfo.ObjectOf(id); obj != nil { + scope = obj.Parent() + } + if scope == p.typesPkg.Scope() { + // We were given a top-level identifier. + // Use the file-level scope instead of the package-level scope. + pos := id.Pos() + for _, f := range p.files { + if f.f.Pos() <= pos && pos < f.f.End() { + scope = p.typesInfo.Scopes[f.f] + break + } + } + } + return scope +} + +func (p *pkg) scanSortable() { + p.sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := nmap[fn.Name.Name]; ok { + has[recv] |= i + } + return false + }) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.sortable[typ] = true + } + } +} + +func (p *pkg) isMain() bool { + for _, f := range p.files { + if f.isMain() { + return true + } + } + return false +} + +func (f *file) isMain() bool { + if f.f.Name.Name == "main" { + return true + } + return false +} + +// lintPackageComment checks package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +func (f *file) lintPackageComment() { + if f.isTest() { + return + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + f.f.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range f.f.Comments { + if cg.Pos() > f.f.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := f.fset.Position(lastCG.End()) + pkgPos := f.fset.Position(f.f.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") + return + } + } + + if f.f.Doc == nil { + f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") + return + } + s := f.f.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") + s = ts + } + // Only non-main packages need to keep to this form. + if !f.pkg.main && !strings.HasPrefix(s, prefix) { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) + } +} + +// lintBlankImports complains if a non-main package has blank imports that are +// not documented. +func (f *file) lintBlankImports() { + // In package main and in tests, we don't complain about blank imports. + if f.pkg.main || f.isTest() { + return + } + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range f.f.Imports { + pos := f.fset.Position(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + if i > 0 { + prev := f.f.Imports[i-1] + prevPos := f.fset.Position(prev.Pos()) + if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { + continue // A subsequent blank in a group. + } + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + ref := "" + f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") + } + } +} + +// lintImports examines import blocks. +func (f *file) lintImports() { + for i, is := range f.f.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !f.isTest() { + f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") + } + + } +} + +const docCommentsLink = styleGuideBase + "#doc-comments" + +// lintExported examines the exported names. +// It complains if any required doc comments are missing, +// or if they are not of the right form. The exact rules are in +// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function +// also tracks the GenDecl structure being traversed to permit +// doc comments for constants to be on top of the const block. +// It also complains if the names stutter when combined with +// the package name. +func (f *file) lintExported() { + if f.isTest() { + return + } + + var lastGen *ast.GenDecl // last GenDecl entered. + + // Set of GenDecls that have already had missing comments flagged. + genDeclMissingComments := make(map[*ast.GenDecl]bool) + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return false + } + // token.CONST, token.TYPE or token.VAR + lastGen = v + return true + case *ast.FuncDecl: + f.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + f.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return false + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = lastGen.Doc + } + f.lintTypeDoc(v, doc) + f.checkStutter(v.Name, "type") + // Don't proceed inside types. + return false + case *ast.ValueSpec: + f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) + return false + } + return true + }) +} + +var ( + allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + anyCapsRE = regexp.MustCompile(`[A-Z]`) +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func isInTopLevel(f *ast.File, ident *ast.Ident) bool { + path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) + for _, f := range path { + switch f.(type) { + case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: + continue + } + return false + } + return true +} + +// lintNames examines all names in the file. +// It complains if any use underscores or incorrect known initialisms. +func (f *file) lintNames() { + // Package names need slightly different handling than other names. + if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") + } + if anyCapsRE.MatchString(f.f.Name.Name) { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) + } + + check := func(id *ast.Ident, thing string) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + capCount := 0 + for _, c := range id.Name { + if 'A' <= c && c <= 'Z' { + capCount++ + } + } + if capCount >= 2 { + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") + return + } + } + if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) + } + } + + should := lintName(id.Name) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing) + } + } + } + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var") + } + } + case *ast.FuncDecl: + if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing) + } + + checkList(v.Type.Params, thing+" parameter") + checkList(v.Type.Results, thing+" result") + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter") + checkList(ft.Results, "interface method result") + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var") + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var") + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field") + } + } + } + return true + }) +} + +// lintName returns a different name if it should be different. +func lintName(name string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); commonInitialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} + +// lintTypeDoc examines the doc comment on a type. +// It complains if they are missing from an exported type, +// or if they are not of the standard form. +func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) + return + } + + s := doc.Text() + articles := [...]string{"A", "An", "The"} + for _, a := range articles { + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) + } +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, + "Unwrap": true, +} + +// lintFuncDoc examines doc comments on functions and methods. +// It complains if they are missing, or not of the right form. +// It has specific exclusions for well-known methods (see commonMethods above). +func (f *file) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if f.pkg.sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) + return + } + s := fn.Doc.Text() + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +// lintValueSpecDoc examines package-global variables and constants. +// It complains if they are not individually declared, +// or if they are not suitably documented in the right form (unless they are in a block that is commented). +func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + if !strings.HasPrefix(doc.Text(), prefix) { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +func (f *file) checkStutter(id *ast.Ident, thing string) { + pkg, name := f.f.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) + } +} + +// zeroLiteral is a set of ast.BasicLit values that are zero values. +// It is not exhaustive. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// lintElses examines else blocks. It complains about any else block whose if block ends in a return. +func (f *file) lintElses() { + // We don't want to flag if { } else if { } else { } constructions. + // They will appear as an IfStmt whose Else field is also an IfStmt. + // Record such a node so we ignore it when we visit it. + ignore := make(map[*ast.IfStmt]bool) + + f.walk(func(node ast.Node) bool { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return true + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + ignore[elseif] = true + return true + } + if ignore[ifStmt] { + return true + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return true + } + if len(ifStmt.Body.List) == 0 { + return true + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) + } + return true + }) +} + +// lintRanges examines range clauses. It complains about redundant constructions. +func (f *file) lintRanges() { + f.walk(func(node ast.Node) bool { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return true + } + + if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { + p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") + + newRS := *rs // shallow copy + newRS.Value = nil + newRS.Key = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + + return true + } + + if isIdent(rs.Value, "_") { + p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) + + newRS := *rs // shallow copy + newRS.Value = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + } + + return true + }) +} + +// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. +func (f *file) lintErrorf() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return true + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := f.pkg.typeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return true + } + if !f.imports("errors") { + return true + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return true + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = f.render(se.X) + } + p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) + + m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + return true + }) +} + +// lintErrors examines global error vars. It complains if they aren't named in the standard way. +func (f *file) lintErrors() { + for _, decl := range f.f.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) + } + } + } +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} + +// lintErrorStrings examines error strings. +// It complains if they are capitalized or end in punctuation or a newline. +func (f *file) lintErrorStrings() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return true + } + if len(ce.Args) < 1 { + return true + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return true + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return true + } + clean, conf := lintErrorString(s) + if clean { + return true + } + + f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), + "error strings should not be capitalized or end with punctuation or a newline") + return true + }) +} + +// lintReceiverNames examines receiver names. It complains about inconsistent +// names used for the same type and names such as "this". +func (f *file) lintReceiverNames() { + typeReceiver := map[string]string{} + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return true + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) + return true + } + if name == "this" || name == "self" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + return true + } + recv := receiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) + return true + } + typeReceiver[recv] = name + return true + }) +} + +// lintIncDec examines statements that increment or decrement a variable. +// It complains if they don't use x++ or x--. +func (f *file) lintIncDec() { + f.walk(func(n ast.Node) bool { + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + if len(as.Lhs) != 1 { + return true + } + if !isOne(as.Rhs[0]) { + return true + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return true + } + f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) + return true + }) +} + +// lintErrorReturn examines function declarations that return an error. +// It complains if the error isn't the last parameter. +func (f *file) lintErrorReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return true + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return true + } + if isIdent(ret[len(ret)-1].Type, "error") { + return true + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") + break // only flag one + } + } + return true + }) +} + +// lintUnexportedReturn examines exported function declarations. +// It complains if any return an unexported type. +func (f *file) lintUnexportedReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + if fn.Type.Results == nil { + return false + } + if !fn.Name.IsExported() { + return false + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return false + } + } + for _, ret := range fn.Type.Results.List { + typ := f.pkg.typeOf(ret.Type) + if exportedType(typ) { + continue + } + f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), + "exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ) + break // only flag one + } + return false + }) +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + // Builtin types have no package. + return T.Obj().Pkg() == nil || T.Obj().Exported() + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func (f *file) lintTimeNames() { + f.walk(func(node ast.Node) bool { + v, ok := node.(*ast.ValueSpec) + if !ok { + return true + } + for _, name := range v.Names { + origTyp := f.pkg.typeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !f.pkg.isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) + } + return true + }) +} + +// lintContextKeyTypes checks for call expressions to context.WithValue with +// basic types used for the key argument. +// See: https://golang.org/issue/17293 +func (f *file) lintContextKeyTypes() { + f.walk(func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + f.checkContextKeyType(node) + } + + return true + }) +} + +// checkContextKeyType reports an error if the call expression calls +// context.WithValue with a key argument of basic type. +func (f *file) checkContextKeyType(x *ast.CallExpr) { + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.pkg.typesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) + } +} + +// lintContextArgs examines function declarations that contain an +// argument with a type of context.Context +// It complains if that argument isn't the first parameter. +func (f *file) lintContextArgs() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return true + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + for _, arg := range fn.Type.Params.List[1:] { + if isPkgDot(arg.Type, "context", "Context") { + f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") + break // only flag one + } + } + return true + }) +} + +// containsComments returns whether the interval [start, end) contains any +// comments without "// MATCH " prefix. +func (f *file) containsComments(start, end token.Pos) bool { + for _, cgroup := range f.f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (f *file) walk(fn func(ast.Node) bool) { + ast.Walk(walker(fn), f.f) +} + +func (f *file) render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func (f *file) debugRender(x interface{}) string { + var buf bytes.Buffer + if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { + panic(err) + } + return buf.String() +} + +// walker adapts a function to satisfy the ast.Visitor interface. +// The function return whether the walk should proceed into the node's children. +type walker func(ast.Node) bool + +func (w walker) Visit(node ast.Node) ast.Visitor { + if w(node) { + return w + } + return nil +} + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// isUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.render(expr) + tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +// firstLineOf renders the given node and returns its first line. +// It will also match the indentation of another node. +func (f *file) firstLineOf(node, match ast.Node) string { + line := f.render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return f.indentOf(match) + line +} + +func (f *file) indentOf(node ast.Node) string { + line := srcLine(f.src, f.fset.Position(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} + +func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { + line := srcLine(f.src, f.fset.Position(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} + +// imports returns true if the current file imports the specified package path. +func (f *file) imports(importPath string) bool { + all := astutil.Imports(f.fset, f.f) + for _, p := range all { + for _, i := range p { + uq, err := strconv.Unquote(i.Path.Value) + if err == nil && importPath == uq { + return true + } + } + } + return false +} + +// srcLine returns the complete line at p, including the terminating newline. +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go new file mode 100644 index 00000000..6cd37280 --- /dev/null +++ b/vendor/golang.org/x/mod/module/module.go @@ -0,0 +1,718 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type along with support code. +// +// The module.Version type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably Check, verify that +// a particular path, version pair is valid. +// +// Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/mod/semver" + errors "golang.org/x/xerrors" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". + Path string + + // Version is usually a semantic version in canonical form. + // There are three exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. + Version string `json:",omitempty"` +} + +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a ModuleError derived from a Version and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } + } + _, pathMajor, _ := SplitPathVersion(path) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// pathOK reports whether r can appear in an import path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// This matches what "go get" has historically recognized in import paths. +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see "escaped paths" above). +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "escaped paths" above. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by CheckImportPath, +// with two additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. +func CheckPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed module path %q: %v", path, err) + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("malformed module path %q: leading slash", path) + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("malformed module path %q: missing dot in first path element", path) + } + if path[0] == '-' { + return fmt.Errorf("malformed module path %q: leading dash in first path element", path) + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("malformed module path %q: invalid version", path) + } + return nil +} + +// CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// It must not begin or end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if path[0] == '-' { + return fmt.Errorf("leading dash") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("%q disallowed as path element component on Windows", short) + } + } + return nil +} + +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckFilePath(path string) error { + if err := checkPath(path, true); err != nil { + return fmt.Errorf("malformed file path %q: %v", path, err) + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return nil + } + m := semver.Major(v) + if pathMajor == "" { + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), + } +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that MatchPathMajor may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Version fields. +// The Version fields are interpreted as semantic versions (using semver.Compare) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// EscapePath returns the escaped form of the given module path. +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped module path %q", escaped) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) + } + return path, nil +} + +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped version %q", escaped) + } + if err := checkElem(v, true); err != nil { + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) + } + return v, nil +} + +func unescapeString(escaped string) (string, bool) { + var buf []byte + + bang := false + for _, r := range escaped { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 00000000..2988e3cf --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index f4d9b5ec..3a67636f 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis // dialCall is an in-flight Transport dial call to a host. type dialCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done res *ClientConn // valid after done is closed @@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) } type addConnCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done err error @@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { close(c.done) } -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - // p.mu must be held func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { for _, v := range p.conns[key] { diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index cea601fc..b51f0e0c 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -8,6 +8,8 @@ package http2 // flow is the flow control window's size. type flow struct { + _ incomparable + // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index b412a96c..a1ab2f05 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return nil } +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() + type node struct { + _ incomparable + // children is non-nil for internal nodes children *[256]*node diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index bdaba1d4..5571ccfd 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,7 +19,6 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" "crypto/tls" - "errors" "fmt" "io" "net/http" @@ -173,11 +172,6 @@ func (s SettingID) String() string { return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) } -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - // validWireHeaderFieldName reports whether v is a valid header field // name (key). See httpguts.ValidHeaderName for the base rules. // @@ -247,6 +241,7 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { + _ incomparable w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } @@ -319,6 +314,7 @@ func bodyAllowedForStatus(status int) bool { } type httpError struct { + _ incomparable msg string timeout bool } @@ -382,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) { func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index de31d72b..345b7cd8 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -581,13 +581,10 @@ type stream struct { cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen @@ -764,6 +761,7 @@ func (sc *serverConn) readFrames() { // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { + _ incomparable wr FrameWriteRequest // what was written (or attempted) err error // result of the writeFrame call } @@ -774,7 +772,7 @@ type frameWriteResult struct { // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} + sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -1164,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { if wr.write.staysWithinBuffer(sc.bw.Available()) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) + sc.wroteFrame(frameWriteResult{wr: wr, err: err}) } else { sc.writingFrameAsync = true go sc.writeFrameAsync(wr) @@ -2060,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var trailer http.Header for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + key = http.CanonicalHeaderKey(textproto.TrimString(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": // Bogus. (copy of http1 rules) @@ -2278,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { // requestBody is the Handler's Request.Body type. // Read and Close may be called concurrently. type requestBody struct { + _ incomparable stream *stream conn *serverConn closed bool // for use by Close only diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index d948e96e..76a92e0c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -108,6 +108,19 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if + // there is no other traffic on the connection, the health check will + // be performed every ReadIdleTimeout interval. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + // Defaults to 15s. + PingTimeout time.Duration + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } +func (t *Transport) pingTimeout() time.Duration { + if t.PingTimeout == 0 { + return 15 * time.Second + } + return t.PingTimeout + +} + // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { @@ -675,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return cc, nil } +func (cc *ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + err := cc.Ping(ctx) + if err != nil { + cc.closeForLostPing() + cc.t.connPool().MarkDead(cc) + return + } +} + func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() @@ -846,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error { return nil } -// Close closes the client connection immediately. -// -// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. -func (cc *ClientConn) Close() error { +// closes the client connection immediately. In-flight requests are interrupted. +// err is sent to streams. +func (cc *ClientConn) closeForError(err error) error { cc.mu.Lock() defer cc.cond.Broadcast() defer cc.mu.Unlock() - err := errors.New("http2: client connection force closed via ClientConn.Close") for id, cs := range cc.streams { select { case cs.resc <- resAndError{err: err}: @@ -866,6 +899,20 @@ func (cc *ClientConn) Close() error { return cc.tconn.Close() } +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + err := errors.New("http2: client connection force closed via ClientConn.Close") + return cc.closeForError(err) +} + +// closes the client connection immediately. In-flight requests are interrupted. +func (cc *ClientConn) closeForLostPing() error { + err := errors.New("http2: client connection lost") + return cc.closeForError(err) +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -916,7 +963,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { k = http.CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} + return "", fmt.Errorf("invalid Trailer key %q", k) } keys = append(keys, k) } @@ -1394,13 +1441,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - // requires cc.mu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1616,6 +1656,7 @@ func (cc *ClientConn) writeHeader(name, value string) { } type resAndError struct { + _ incomparable res *http.Response err error } @@ -1663,6 +1704,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { + _ incomparable cc *ClientConn closeWhenIdle bool } @@ -1742,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error { rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse gotReply := false // ever saw a HEADERS reply gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout + var t *time.Timer + if readIdleTimeout != 0 { + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) + defer t.Stop() + } for { f, err := cc.fr.ReadFrame() + if t != nil { + t.Reset(readIdleTimeout) + } if err != nil { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } @@ -1892,7 +1943,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } - header := make(http.Header) + regularFields := f.RegularFields() + strs := make([]string, len(regularFields)) + header := make(http.Header, len(regularFields)) res := &http.Response{ Proto: "HTTP/2.0", ProtoMajor: 2, @@ -1900,7 +1953,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra StatusCode: statusCode, Status: status + " " + http.StatusText(statusCode), } - for _, hf := range f.RegularFields() { + for _, hf := range regularFields { key := http.CanonicalHeaderKey(hf.Name) if key == "Trailer" { t := res.Trailer @@ -1912,7 +1965,18 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra t[http.CanonicalHeaderKey(v)] = nil }) } else { - header[key] = append(header[key], hf.Value) + vv := header[key] + if vv == nil && len(strs) > 0 { + // More than likely this will be a single-element key. + // Most headers aren't multi-valued. + // Set the capacity on strs[0] to 1, so any future append + // won't extend the slice into the other strings. + vv, strs = strs[:1:1], strs[1:] + vv[0] = hf.Value + header[key] = vv + } else { + header[key] = append(vv, hf.Value) + } } } @@ -2198,8 +2262,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { return nil } -var errInvalidTrailers = errors.New("http2: invalid trailers") - func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. @@ -2430,7 +2492,6 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -2469,6 +2530,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { // gzipReader wraps a response body so it can lazily // call gzip.NewReader on the first call to Read type gzipReader struct { + _ incomparable body io.ReadCloser // underlying Response.Body zr *gzip.Reader // lazily-initialized gzip reader zerr error // sticky error diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0f443e69..8cfd6063 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -16,15 +16,16 @@ Or you can manually git clone the repository to See godoc for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) +* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) ## Policy for new packages -We no longer accept new provider-specific packages in this repo. For -defining provider endpoints and provider-specific OAuth2 behavior, we -encourage you to create packages elsewhere. We'll keep the existing -packages for compatibility. +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index aa0d34f1..90657915 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -6,7 +6,7 @@ package oauth2 import ( "errors" - "io" + "log" "net/http" "sync" ) @@ -25,9 +25,6 @@ type Transport struct { // Base is the base RoundTripper used to make HTTP requests. // If nil, http.DefaultTransport is used. Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an @@ -52,35 +49,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) // per RoundTripper contract token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - // req.Body is assumed to have been closed by the base RoundTripper. + // req.Body is assumed to be closed by the base RoundTripper. reqBodyClosed = true - - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil + return t.base().RoundTrip(req2) } -// CancelRequest cancels an in-flight request by closing its connection. +var cancelOnce sync.Once + +// CancelRequest does nothing. It used to be a legacy cancellation mechanism +// but now only it only logs on first use to warn that it's deprecated. +// +// Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } + cancelOnce.Do(func() { + log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") + }) } func (t *Transport) base() http.RoundTripper { @@ -90,19 +74,6 @@ func (t *Transport) base() http.RoundTripper { return http.DefaultTransport } -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { @@ -116,29 +87,3 @@ func cloneRequest(r *http.Request) *http.Request { } return r2 } - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go new file mode 100644 index 00000000..e07899b9 --- /dev/null +++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeheader contains header declarations for the Go runtime's +// slice and string implementations. +// +// This package allows x/sys to use types equivalent to +// reflect.SliceHeader and reflect.StringHeader without introducing +// a dependency on the (relatively heavy) "reflect" package. +package unsafeheader + +import ( + "unsafe" +) + +// Slice is the runtime representation of a slice. +// It cannot be used safely or portably and its representation may change in a later release. +type Slice struct { + Data unsafe.Pointer + Len int + Cap int +} + +// String is the runtime representation of a string. +// It cannot be used safely or portably and its representation may change in a later release. +type String struct { + Data unsafe.Pointer + Len int +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index eb2f78ae..579d2d73 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -89,7 +89,7 @@ constants. Adding new syscall numbers is mostly done by running the build on a sufficiently new installation of the target OS (or updating the source checkouts for the -new build system). However, depending on the OS, you make need to update the +new build system). However, depending on the OS, you may need to update the parsing in mksysnum. ### mksyscall.go @@ -149,10 +149,21 @@ To add a constant, add the header that includes it to the appropriate variable. Then, edit the regex (if necessary) to match the desired constant. Avoid making the regex too broad to avoid matching unintended constants. +### mkmerge.go + +This program is used to extract duplicate const, func, and type declarations +from the generated architecture-specific files listed below, and merge these +into a common file for each OS. + +The merge is performed in the following steps: +1. Construct the set of common code that is idential in all architecture-specific files. +2. Write this common code to the merged file. +3. Remove the common code from all architecture-specific files. + ## Generated files -### `zerror_${GOOS}_${GOARCH}.go` +### `zerrors_${GOOS}_${GOARCH}.go` A file containing all of the system's generated error numbers, error strings, signal numbers, and constants. Generated by `mkerrors.sh` (see above). diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 14e4d5ca..6e5c81ac 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -7,6 +7,7 @@ package unix import ( + "math/bits" "unsafe" ) @@ -79,50 +80,7 @@ func (s *CPUSet) IsSet(cpu int) bool { func (s *CPUSet) Count() int { c := 0 for _, b := range s { - c += onesCount64(uint64(b)) + c += bits.OnesCount64(uint64(b)) } return c } - -// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. -// Once this package can require Go 1.9, we can delete this -// and update the caller to use bits.OnesCount64. -func onesCount64(x uint64) int { - const m0 = 0x5555555555555555 // 01010101 ... - const m1 = 0x3333333333333333 // 00110011 ... - const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - - // Unused in this function, but definitions preserved for - // documentation purposes: - // - // const m3 = 0x00ff00ff00ff00ff // etc. - // const m4 = 0x0000ffff0000ffff - // - // Implementation: Parallel summing of adjacent bits. - // See "Hacker's Delight", Chap. 5: Counting Bits. - // The following pattern shows the general approach: - // - // x = x>>1&(m0&m) + x&(m0&m) - // x = x>>2&(m1&m) + x&(m1&m) - // x = x>>4&(m2&m) + x&(m2&m) - // x = x>>8&(m3&m) + x&(m3&m) - // x = x>>16&(m4&m) + x&(m4&m) - // x = x>>32&(m5&m) + x&(m5&m) - // return int(x) - // - // Masking (& operations) can be left away when there's no - // danger that a field's sum will carry over into the next - // field: Since the result cannot be > 64, 8 bits is enough - // and we can ignore the masks for the shifts by 8 and up. - // Per "Hacker's Delight", the first line can be simplified - // more, but it saves at best one instruction, so we leave - // it alone for clarity. - const m = 1<<64 - 1 - x = x>>1&(m0&m) + x&(m0&m) - x = x>>2&(m1&m) + x&(m1&m) - x = (x>>4 + x) & (m2 & m) - x += x >> 8 - x += x >> 16 - x += x >> 32 - return int(x) & (1<<7 - 1) -} diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 6db717de..3cfefed2 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -23,10 +23,6 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 MOV a1+8(FP), A0 MOV a2+16(FP), A1 MOV a3+24(FP), A2 - MOV $0, A3 - MOV $0, A4 - MOV $0, A5 - MOV $0, A6 MOV trap+0(FP), A7 // syscall entry ECALL MOV A0, r1+32(FP) // r1 @@ -44,9 +40,6 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 MOV a1+8(FP), A0 MOV a2+16(FP), A1 MOV a3+24(FP), A2 - MOV ZERO, A3 - MOV ZERO, A4 - MOV ZERO, A5 MOV trap+0(FP), A7 // syscall entry ECALL MOV A0, r1+32(FP) diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go index 6e322969..a178a614 100644 --- a/vendor/golang.org/x/sys/unix/bluetooth_linux.go +++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go @@ -23,6 +23,7 @@ const ( HCI_CHANNEL_USER = 1 HCI_CHANNEL_MONITOR = 2 HCI_CHANNEL_CONTROL = 3 + HCI_CHANNEL_LOGGING = 4 ) // Socketoption Level diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go index c56bc8b0..761db66e 100644 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go @@ -8,6 +8,7 @@ package unix const ( + DLT_HHDLC = 0x79 IFF_SMART = 0x20 IFT_1822 = 0x2 IFT_A12MPPSWITCH = 0x82 @@ -210,13 +211,18 @@ const ( IFT_XETHER = 0x1a IPPROTO_MAXID = 0x34 IPV6_FAITH = 0x1d + IPV6_MIN_MEMBERSHIPS = 0x1f IP_FAITH = 0x16 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MIN_MEMBERSHIPS = 0x1f MAP_NORESERVE = 0x40 MAP_RENAME = 0x20 NET_RT_MAXID = 0x6 RTF_PRCLONING = 0x10000 RTM_OLDADD = 0x9 RTM_OLDDEL = 0xa + RT_CACHING_CONTEXT = 0x1 + RT_NORTREF = 0x2 SIOCADDRT = 0x8030720a SIOCALIFADDR = 0x8118691b SIOCDELRT = 0x8030720b diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go index 3e977117..070f44b6 100644 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go @@ -8,6 +8,7 @@ package unix const ( + DLT_HHDLC = 0x79 IFF_SMART = 0x20 IFT_1822 = 0x2 IFT_A12MPPSWITCH = 0x82 @@ -210,13 +211,18 @@ const ( IFT_XETHER = 0x1a IPPROTO_MAXID = 0x34 IPV6_FAITH = 0x1d + IPV6_MIN_MEMBERSHIPS = 0x1f IP_FAITH = 0x16 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MIN_MEMBERSHIPS = 0x1f MAP_NORESERVE = 0x40 MAP_RENAME = 0x20 NET_RT_MAXID = 0x6 RTF_PRCLONING = 0x10000 RTM_OLDADD = 0x9 RTM_OLDDEL = 0xa + RT_CACHING_CONTEXT = 0x1 + RT_NORTREF = 0x2 SIOCADDRT = 0x8040720a SIOCALIFADDR = 0x8118691b SIOCDELRT = 0x8040720b diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go new file mode 100644 index 00000000..946dcf3f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep +// them here for backwards compatibility. + +package unix + +const ( + DLT_HHDLC = 0x79 + IPV6_MIN_MEMBERSHIPS = 0x1f + IP_MAX_SOURCE_FILTER = 0x400 + IP_MIN_MEMBERSHIPS = 0x1f + RT_CACHING_CONTEXT = 0x1 + RT_NORTREF = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 39c03f1e..4dc53486 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -9,12 +9,11 @@ package unix import "unsafe" // fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux -// systems by flock_linux_32bit.go to be SYS_FCNTL64. +// systems by fcntl_linux_32bit.go to be SYS_FCNTL64. var fcntl64Syscall uintptr = SYS_FCNTL -// FcntlInt performs a fcntl syscall on fd with the provided command and argument. -func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) +func fcntl(fd int, cmd, arg int) (int, error) { + valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg)) var err error if errno != 0 { err = errno @@ -22,6 +21,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) { return int(valptr), err } +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go new file mode 100644 index 00000000..b27be0a0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +// Set adds fd to the set fds. +func (fds *FdSet) Set(fd int) { + fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS)) +} + +// Clear removes fd from the set fds. +func (fds *FdSet) Clear(fd int) { + fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS)) +} + +// IsSet returns whether fd is in the set fds. +func (fds *FdSet) IsSet(fd int) bool { + return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0 +} + +// Zero clears the set fds. +func (fds *FdSet) Zero() { + for i := range fds.Bits { + fds.Bits[i] = 0 + } +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index f121a8d6..3559e5dc 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -6,7 +6,19 @@ package unix -import "runtime" +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // @@ -14,7 +26,7 @@ import "runtime" func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctlSetWinsize(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } @@ -24,7 +36,30 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctlSetTermios(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 5a22eca9..ece31e9d 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS exit fi @@ -124,7 +124,7 @@ freebsd_arm) freebsd_arm64) mkerrors="$mkerrors -m64" mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) mkerrors="$mkerrors -m32" @@ -190,6 +190,12 @@ solaris_amd64) mksysnum= mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; +illumos_amd64) + mksyscall="go run mksyscall_solaris.go" + mkerrors= + mksysnum= + mktypes= + ;; *) echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 exit 1 @@ -212,9 +218,16 @@ esac echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; elif [ "$GOOS" == "darwin" ]; then # pre-1.12, direct syscalls - echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; + echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; # 1.12 and later, syscalls via libSystem echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; + # 1.13 and later, syscalls via libSystem (including syscallPtr) + echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; + elif [ "$GOOS" == "illumos" ]; then + # illumos code generation requires a --illumos switch + echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go"; + # illumos implies solaris, so solaris code generation is also required + echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go"; else echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 14624b95..780e387e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -44,6 +44,7 @@ includes_AIX=' #include #include #include +#include #include #include #include @@ -60,6 +61,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -80,6 +82,7 @@ includes_Darwin=' includes_DragonFly=' #include #include +#include #include #include #include @@ -102,7 +105,9 @@ includes_FreeBSD=' #include #include #include +#include #include +#include #include #include #include @@ -179,24 +184,36 @@ struct ltchars { #include #include #include +#include #include #include +#include +#include #include #include +#include #include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include #include #include #include @@ -206,26 +223,23 @@ struct ltchars { #include #include #include +#include #include +#include #include #include +#include #include -#include #include #include -#include -#include -#include #include -#include -#include +#include #include -#include +#include +#include +#include #include -#include -#include -#include -#include + #include #include @@ -264,6 +278,16 @@ struct ltchars { #define FS_KEY_DESC_PREFIX "fscrypt:" #define FS_KEY_DESC_PREFIX_SIZE 8 #define FS_MAX_KEY_SIZE 64 + +// The code generator produces -0x1 for (~0), but an unsigned value is necessary +// for the tipc_subscr timeout __u32 field. +#undef TIPC_WAIT_FOREVER +#define TIPC_WAIT_FOREVER 0xffffffff + +// Copied from linux/l2tp.h +// Including linux/l2tp.h here causes conflicts between linux/in.h +// and netinet/in.h included via net/route.h above. +#define IPPROTO_L2TP 115 ' includes_NetBSD=' @@ -273,6 +297,7 @@ includes_NetBSD=' #include #include #include +#include #include #include #include @@ -299,6 +324,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -335,6 +361,7 @@ includes_OpenBSD=' includes_SunOS=' #include #include +#include #include #include #include @@ -427,6 +454,7 @@ ccflags="$@" $2 == "XCASE" || $2 == "ALTWERASE" || $2 == "NOKERNINFO" || + $2 == "NFDBITS" || $2 ~ /^PAR/ || $2 ~ /^SIG[^_]/ || $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || @@ -451,13 +479,15 @@ ccflags="$@" $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || $2 ~ /^MODULE_INIT_/ || $2 !~ "NLA_TYPE_MASK" && + $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -465,8 +495,9 @@ ccflags="$@" $2 ~ /^TCSET/ || $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || $2 !~ "RTF_BITS" && - $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || + $2 ~ /^(IFF|IFT|NET_RT|RTM(GRP)?|RTF|RTV|RTA|RTAX)_/ || $2 ~ /^BIOC/ || + $2 ~ /^DIOC/ || $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || @@ -477,7 +508,10 @@ ccflags="$@" $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || $2 ~ /^ALG_/ || - $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ || + $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_VERITY_/ || + $2 ~ /^FSCRYPT_/ || $2 ~ /^GRND_/ || $2 ~ /^RND/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || @@ -504,8 +538,11 @@ ccflags="$@" $2 ~ /^WDIOC_/ || $2 ~ /^NFN/ || $2 ~ /^XDP_/ || + $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || + $2 ~ /^TIPC_/ || + $2 ~ /^DEVLINK_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go b/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go new file mode 100644 index 00000000..5144deec --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +// Round the length of a raw sockaddr up to align it properly. +func cmsgAlignOf(salen int) int { + salign := SizeofPtr + if SizeofPtr == 8 && !supportsABI(_dragonflyABIChangeVersion) { + // 64-bit Dragonfly before the September 2019 ABI changes still requires + // 32-bit aligned access to network subsystem. + salign = 4 + } + return (salen + salign - 1) & ^(salign - 1) +} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 6079eb4a..8bf45705 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -17,7 +17,7 @@ func UnixCredentials(ucred *Ucred) []byte { h.Level = SOL_SOCKET h.Type = SCM_CREDENTIALS h.SetLen(CmsgLen(SizeofUcred)) - *((*Ucred)(cmsgData(h))) = *ucred + *(*Ucred)(h.data(0)) = *ucred return b } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 062bcaba..003916ed 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -9,35 +9,9 @@ package unix import ( - "runtime" "unsafe" ) -// Round the length of a raw sockaddr up to align it properly. -func cmsgAlignOf(salen int) int { - salign := SizeofPtr - - switch runtime.GOOS { - case "aix": - // There is no alignment on AIX. - salign = 1 - case "darwin", "dragonfly", "solaris", "illumos": - // NOTE: It seems like 64-bit Darwin, DragonFly BSD, - // illumos, and Solaris kernels still require 32-bit - // aligned access to network subsystem. - if SizeofPtr == 8 { - salign = 4 - } - case "netbsd", "openbsd": - // NetBSD and OpenBSD armv7 require 64-bit alignment. - if runtime.GOARCH == "arm" { - salign = 8 - } - } - - return (salen + salign - 1) & ^(salign - 1) -} - // CmsgLen returns the value to store in the Len field of the Cmsghdr // structure, taking into account any necessary alignment. func CmsgLen(datalen int) int { @@ -50,8 +24,8 @@ func CmsgSpace(datalen int) int { return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen) } -func cmsgData(h *Cmsghdr) unsafe.Pointer { - return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr))) +func (h *Cmsghdr) data(offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr)) + offset) } // SocketControlMessage represents a socket control message. @@ -94,10 +68,8 @@ func UnixRights(fds ...int) []byte { h.Level = SOL_SOCKET h.Type = SCM_RIGHTS h.SetLen(CmsgLen(datalen)) - data := cmsgData(h) - for _, fd := range fds { - *(*int32)(data) = int32(fd) - data = unsafe.Pointer(uintptr(data) + 4) + for i, fd := range fds { + *(*int32)(h.data(4 * uintptr(i))) = int32(fd) } return b } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go new file mode 100644 index 00000000..7d08dae5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin freebsd linux netbsd openbsd solaris + +package unix + +import ( + "runtime" +) + +// Round the length of a raw sockaddr up to align it properly. +func cmsgAlignOf(salen int) int { + salign := SizeofPtr + + // dragonfly needs to check ABI version at runtime, see cmsgAlignOf in + // sockcmsg_dragonfly.go + switch runtime.GOOS { + case "aix": + // There is no alignment on AIX. + salign = 1 + case "darwin", "illumos", "solaris": + // NOTE: It seems like 64-bit Darwin, Illumos and Solaris + // kernels still require 32-bit aligned access to network + // subsystem. + if SizeofPtr == 8 { + salign = 4 + } + case "netbsd", "openbsd": + // NetBSD and OpenBSD armv7 require 64-bit alignment. + if runtime.GOARCH == "arm" { + salign = 8 + } + } + + return (salen + salign - 1) & ^(salign - 1) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 1aa065f9..9ad8a0d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -350,49 +350,12 @@ func (w WaitStatus) Signal() Signal { func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } -func (w WaitStatus) CoreDump() bool { return w&0x200 != 0 } +func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, // Therefore, the programmer must call dup2 instead of fcntl in this case. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index bf05603f..b3c8e330 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 13d4321f..9a6e0241 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 97a8eef6..68605db6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -237,7 +237,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -413,8 +413,6 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e return kevent(kq, change, len(changes), event, len(events), timeout) } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - // sysctlmib translates name to mib number and appends any additional args. func sysctlmib(name string, args ...int) ([]_C_int, error) { // Translate name to mib number. @@ -512,6 +510,23 @@ func SysctlRaw(name string, args ...int) ([]byte, error) { return buf[:n], nil } +func SysctlClockinfo(name string) (*Clockinfo, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofClockinfo) + var ci Clockinfo + if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofClockinfo { + return nil, EIO + } + return &ci, nil +} + //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { @@ -579,8 +594,6 @@ func Futimes(fd int, tv []Timeval) error { return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -//sys fcntl(fd int, cmd int, arg int) (val int, err error) - //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go new file mode 100644 index 00000000..6a15cba6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.12,!go1.13 + +package unix + +import ( + "unsafe" +) + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // To implement this using libSystem we'd need syscall_syscallPtr for + // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall + // back to raw syscalls for this func on Go 1.12. + var p unsafe.Pointer + if len(buf) > 0 { + p = unsafe.Pointer(&buf[0]) + } else { + p = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + return n, errnoErr(e1) + } + return n, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go new file mode 100644 index 00000000..dc0befee --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -0,0 +1,108 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.13 + +package unix + +import ( + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fdopendir_trampoline() + +//go:linkname libc_fdopendir libc_fdopendir +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + var s []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(&entry) + hdr.Cap = reclen + hdr.Len = reclen + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 216b4ac9..0cf31acf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -89,7 +89,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } @@ -156,23 +155,6 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( //sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) -func SysctlClockinfo(name string) (*Clockinfo, error) { - mib, err := sysctlmib(name) - if err != nil { - return nil, err - } - - n := uintptr(SizeofClockinfo) - var ci Clockinfo - if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { - return nil, err - } - if n != SizeofClockinfo { - return nil, EIO - } - return &ci, nil -} - //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -334,48 +316,15 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { * Wrapped */ +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + //sys kill(pid int, signum int, posix int) (err error) func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} @@ -474,6 +423,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tp *Timeval) (err error) //sysnb Getuid() (uid int) //sysnb Issetugid() (tainted bool) //sys Kqueue() (fd int, err error) @@ -498,7 +448,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go new file mode 100644 index 00000000..6b223f91 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,386,!go1.12 + +package unix + +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 489726fa..2724e3a5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -18,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) @@ -43,6 +34,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -56,7 +51,6 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go new file mode 100644 index 00000000..68ebd6fa --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,amd64,!go1.12 + +package unix + +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 914b89bd..ce2e0d24 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,8 @@ import ( "syscall" ) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -18,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) @@ -43,6 +34,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -56,7 +51,6 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go new file mode 100644 index 00000000..0e3f25ac --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,arm,!go1.12 + +package unix + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 4a284cf5..fc17a3f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -8,6 +8,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -16,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) @@ -41,6 +34,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -58,7 +55,3 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go new file mode 100644 index 00000000..01d45040 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,arm64,!go1.12 + +package unix + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 52dcd88f..1e91ddf3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -10,6 +10,10 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -18,17 +22,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) @@ -43,6 +36,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -60,7 +57,3 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 4b4ae460..f34c86c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -15,6 +15,7 @@ func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // 32-bit only func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) //go:linkname syscall_syscall syscall.syscall //go:linkname syscall_syscall6 syscall.syscall6 @@ -22,6 +23,7 @@ func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, er //go:linkname syscall_syscall9 syscall.syscall9 //go:linkname syscall_rawSyscall syscall.rawSyscall //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 +//go:linkname syscall_syscallPtr syscall.syscallPtr // Find the entry point for f. See comments in runtime/proc.go for the // function of the same name. diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 260a400f..8a195ae5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -12,7 +12,25 @@ package unix -import "unsafe" +import ( + "sync" + "unsafe" +) + +// See version list in https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/param.h +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// First __DragonFly_version after September 2019 ABI changes +// http://lists.dragonflybsd.org/pipermail/users/2019-September/358280.html +const _dragonflyABIChangeVersion = 500705 + +func supportsABI(ver uint32) bool { + osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) + return osreldate >= ver +} // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { @@ -150,42 +168,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) @@ -325,7 +308,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 9babb31e..a6b4830a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 329d240b..6932e7c2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -201,42 +201,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} @@ -497,8 +462,12 @@ func convertFromDirents11(buf []byte, old []byte) int { dstPos := 0 srcPos := 0 for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - dstDirent := (*Dirent)(unsafe.Pointer(&buf[dstPos])) - srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos])) + var dstDirent Dirent + var srcDirent dirent_freebsd11 + + // If multiple direntries are written, sometimes when we reach the final one, + // we may have cap of old less than size of dirent_freebsd11. + copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) if dstPos+reclen > len(buf) { @@ -514,6 +483,7 @@ func convertFromDirents11(buf []byte, old []byte) int { dstDirent.Pad1 = 0 copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) + copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] for i := range padding { padding[i] = 0 @@ -551,20 +521,10 @@ func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) } -func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - func PtraceGetRegs(pid int, regsout *Reg) (err error) { return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) } -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} - func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) } @@ -688,7 +648,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 21e03958..72a506dd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -50,3 +54,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceGetFsBase(pid int, fsbase *int64) (err error) { + return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) +} + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9c945a65..d5e376ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -50,3 +54,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceGetFsBase(pid int, fsbase *int64) (err error) { + return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) +} + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5cd6243f..4ea45bce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -50,3 +54,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a3180548..aa5326db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -50,3 +54,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go new file mode 100644 index 00000000..99e62dcd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -0,0 +1,57 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// illumos system calls not present on Solaris. + +// +build amd64,illumos + +package unix + +import "unsafe" + +func bytes2iovec(bs [][]byte) []Iovec { + iovecs := make([]Iovec, len(bs)) + for i, b := range bs { + iovecs[i].SetLen(len(b)) + if len(b) > 0 { + // somehow Iovec.Base on illumos is (*int8), not (*byte) + iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + } else { + iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + } + } + return iovecs +} + +//sys readv(fd int, iovs []Iovec) (n int, err error) + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = readv(fd, iovecs) + return n, err +} + +//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error) + +func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = preadv(fd, iovecs, off) + return n, err +} + +//sys writev(fd int, iovs []Iovec) (n int, err error) + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = writev(fd, iovecs) + return n, err +} + +//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error) + +func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = pwritev(fd, iovecs, off) + return n, err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 637b5017..942a4bbf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -71,6 +71,17 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // IoctlSetPointerInt performs an ioctl operation which sets an // integer value on fd, using the specified request number. The ioctl // argument is called with a pointer to the integer value, rather than @@ -80,52 +91,18 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) } -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) @@ -798,6 +775,104 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } +// SockaddrTIPC implements the Sockaddr interface for AF_TIPC type sockets. +// For more information on TIPC, see: http://tipc.sourceforge.net/. +type SockaddrTIPC struct { + // Scope is the publication scopes when binding service/service range. + // Should be set to TIPC_CLUSTER_SCOPE or TIPC_NODE_SCOPE. + Scope int + + // Addr is the type of address used to manipulate a socket. Addr must be + // one of: + // - *TIPCSocketAddr: "id" variant in the C addr union + // - *TIPCServiceRange: "nameseq" variant in the C addr union + // - *TIPCServiceName: "name" variant in the C addr union + // + // If nil, EINVAL will be returned when the structure is used. + Addr TIPCAddr + + raw RawSockaddrTIPC +} + +// TIPCAddr is implemented by types that can be used as an address for +// SockaddrTIPC. It is only implemented by *TIPCSocketAddr, *TIPCServiceRange, +// and *TIPCServiceName. +type TIPCAddr interface { + tipcAddrtype() uint8 + tipcAddr() [12]byte +} + +func (sa *TIPCSocketAddr) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCSocketAddr{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCSocketAddr) tipcAddrtype() uint8 { return TIPC_SOCKET_ADDR } + +func (sa *TIPCServiceRange) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceRange{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceRange) tipcAddrtype() uint8 { return TIPC_SERVICE_RANGE } + +func (sa *TIPCServiceName) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceName{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceName) tipcAddrtype() uint8 { return TIPC_SERVICE_ADDR } + +func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Addr == nil { + return nil, 0, EINVAL + } + + sa.raw.Family = AF_TIPC + sa.raw.Scope = int8(sa.Scope) + sa.raw.Addrtype = sa.Addr.tipcAddrtype() + sa.raw.Addr = sa.Addr.tipcAddr() + + return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil +} + +// SockaddrL2TPIP implements the Sockaddr interface for IPPROTO_L2TP/AF_INET sockets. +type SockaddrL2TPIP struct { + Addr [4]byte + ConnId uint32 + raw RawSockaddrL2TPIP +} + +func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_INET + sa.raw.Conn_id = sa.ConnId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil +} + +// SockaddrL2TPIP6 implements the Sockaddr interface for IPPROTO_L2TP/AF_INET6 sockets. +type SockaddrL2TPIP6 struct { + Addr [16]byte + ZoneId uint32 + ConnId uint32 + raw RawSockaddrL2TPIP6 +} + +func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_INET6 + sa.raw.Conn_id = sa.ConnId + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -843,30 +918,63 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil case AF_INET: - pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet4) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + + switch proto { + case IPPROTO_L2TP: + pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) + sa := new(SockaddrL2TPIP) + sa.ConnId = pp.Conn_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + default: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil } - return sa, nil case AF_INET6: - pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet6) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + + switch proto { + case IPPROTO_L2TP: + pp := (*RawSockaddrL2TPIP6)(unsafe.Pointer(rsa)) + sa := new(SockaddrL2TPIP6) + sa.ConnId = pp.Conn_id + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + default: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil } - return sa, nil case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) @@ -923,6 +1031,27 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } + return sa, nil + case AF_TIPC: + pp := (*RawSockaddrTIPC)(unsafe.Pointer(rsa)) + + sa := &SockaddrTIPC{ + Scope: int(pp.Scope), + } + + // Determine which union variant is present in pp.Addr by checking + // pp.Addrtype. + switch pp.Addrtype { + case TIPC_SERVICE_RANGE: + sa.Addr = (*TIPCServiceRange)(unsafe.Pointer(&pp.Addr)) + case TIPC_SERVICE_ADDR: + sa.Addr = (*TIPCServiceName)(unsafe.Pointer(&pp.Addr)) + case TIPC_SOCKET_ADDR: + sa.Addr = (*TIPCSocketAddr)(unsafe.Pointer(&pp.Addr)) + default: + return nil, EINVAL + } + return sa, nil } return nil, EAFNOSUPPORT @@ -1160,6 +1289,34 @@ func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer) } +// KeyctlRestrictKeyring implements the KEYCTL_RESTRICT_KEYRING command. This +// command limits the set of keys that can be linked to the keyring, regardless +// of keyring permissions. The command requires the "setattr" permission. +// +// When called with an empty keyType the command locks the keyring, preventing +// any further keys from being linked to the keyring. +// +// The "asymmetric" keyType defines restrictions requiring key payloads to be +// DER encoded X.509 certificates signed by keys in another keyring. Restrictions +// for "asymmetric" include "builtin_trusted", "builtin_and_secondary_trusted", +// "key_or_keyring:", and "key_or_keyring::chain". +// +// As of Linux 4.12, only the "asymmetric" keyType defines type-specific +// restrictions. +// +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_restrict_keyring.3.html +// http://man7.org/linux/man-pages/man2/keyctl.2.html +func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error { + if keyType == "" { + return keyctlRestrictKeyring(KEYCTL_RESTRICT_KEYRING, ringid) + } + return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) +} + +//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL + func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny @@ -1403,8 +1560,12 @@ func PtraceSyscall(pid int, signal int) (err error) { func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } +func PtraceInterrupt(pid int) (err error) { return ptrace(PTRACE_INTERRUPT, pid, 0, 0) } + func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } +func PtraceSeize(pid int) (err error) { return ptrace(PTRACE_SEIZE, pid, 0, 0) } + func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } //sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) @@ -1461,8 +1622,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Acct(path string) (err error) //sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) //sys Adjtimex(buf *Timex) (state int, err error) -//sys Capget(hdr *CapUserHeader, data *CapUserData) (err error) -//sys Capset(hdr *CapUserHeader, data *CapUserData) (err error) +//sysnb Capget(hdr *CapUserHeader, data *CapUserData) (err error) +//sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) @@ -1472,6 +1633,15 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) + +func Dup2(oldfd, newfd int) error { + // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. + if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { + return Dup3(oldfd, newfd, 0) + } + return dup2(oldfd, newfd) +} + //sys Dup3(oldfd int, newfd int, flags int) (err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) @@ -1481,7 +1651,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) -//sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) //sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) //sys FinitModule(fd int, params string, flags int) (err error) @@ -1537,6 +1706,17 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +// PrctlRetInt performs a prctl operation specified by option and further +// optional arguments arg2 through arg5 depending on option. It returns a +// non-negative integer that is returned by the prctl syscall. +func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (int, error) { + ret, _, err := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // issue 1435. // On linux Setuid and Setgid only affects the current thread, not the process. // This does not match what most callers expect so we must return an error @@ -1550,6 +1730,30 @@ func Setgid(uid int) (err error) { return EOPNOTSUPP } +// SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. +// setfsgid(2) will return a non-nil error only if its caller lacks CAP_SETUID capability. +// If the call fails due to other reasons, current fsgid will be returned. +func SetfsgidRetGid(gid int) (int, error) { + return setfsgid(gid) +} + +// SetfsuidRetUid sets fsuid for current thread and returns previous fsuid set. +// setfsgid(2) will return a non-nil error only if its caller lacks CAP_SETUID capability +// If the call fails due to other reasons, current fsuid will be returned. +func SetfsuidRetUid(uid int) (int, error) { + return setfsuid(uid) +} + +func Setfsgid(gid int) error { + _, err := setfsgid(gid) + return err +} + +func Setfsuid(uid int) error { + _, err := setfsuid(uid) + return err +} + func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { return signalfd(fd, sigmask, _C__NSIG/8, flags) } @@ -1562,6 +1766,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) +//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) +//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) @@ -1572,6 +1779,123 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ //sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE +//sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV +//sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV +//sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV +//sys pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PWRITEV +//sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 +//sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 + +func bytes2iovec(bs [][]byte) []Iovec { + iovecs := make([]Iovec, len(bs)) + for i, b := range bs { + iovecs[i].SetLen(len(b)) + if len(b) > 0 { + iovecs[i].Base = &b[0] + } else { + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + return iovecs +} + +// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit +// systems, hi will always be 0. On 32-bit systems, offs will be split in half. +// preadv/pwritev chose this calling convention so they don't need to add a +// padding-register for alignment on ARM. +func offs2lohi(offs int64) (lo, hi uintptr) { + return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) +} + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + lo, hi := offs2lohi(offset) + n, err = preadv(fd, iovecs, lo, hi) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { + iovecs := bytes2iovec(iovs) + lo, hi := offs2lohi(offset) + n, err = preadv2(fd, iovecs, lo, hi, flags) + readvRacedetect(iovecs, n, err) + return n, err +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + lo, hi := offs2lohi(offset) + n, err = pwritev(fd, iovecs, lo, hi) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + lo, hi := offs2lohi(offset) + n, err = pwritev2(fd, iovecs, lo, hi, flags) + writevRacedetect(iovecs, n) + return n, err +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) @@ -1761,6 +2085,17 @@ func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err erro return openByHandleAt(mountFD, handle.fileHandle, flags) } +// Klogset wraps the sys_syslog system call; it sets console_loglevel to +// the value specified by arg and passes a dummy pointer to bufp. +func Klogset(typ int, arg int) (err error) { + var p unsafe.Pointer + _, _, errno := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(p), uintptr(arg)) + if errno != 0 { + return errnoErr(errno) + } + return nil +} + /* * Unimplemented */ @@ -1855,7 +2190,6 @@ func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err erro // TimerGetoverrun // TimerGettime // TimerSettime -// Timerfd // Tkill (obsolete) // Tuxcall // Umount2 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index e2f8cf6e..048d18e3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -49,7 +49,7 @@ func Pipe2(p []int, flags int) (err error) { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 @@ -70,8 +70,8 @@ func Pipe2(p []int, flags int) (err error) { //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 -//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 -//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 +//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 //sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 //sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 //sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 @@ -372,6 +372,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 87a30744..72efe86e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -6,7 +6,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -55,8 +55,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -163,6 +163,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index f6267944..e1913e2c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -80,7 +80,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 @@ -98,8 +98,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT -//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 -//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 +//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 //sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 //sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 //sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 @@ -252,6 +252,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cb20b15d..c6de6b91 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -25,7 +25,7 @@ func EpollCreate(size int) (fd int, err error) { //sysnb Getegid() (egid int) //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 @@ -42,12 +42,12 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -168,6 +168,24 @@ func Pipe2(p []int, flags int) (err error) { return } +// Getrlimit prefers the prlimit64 system call. See issue 38604. +func Getrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + return getrlimit(resource, rlim) +} + +// Setrlimit prefers the prlimit64 system call. See issue 38604. +func Setrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + return setrlimit(resource, rlim) +} + func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } @@ -180,6 +198,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } @@ -188,9 +210,9 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error func Pause() error { _, err := ppoll(nil, 0, nil, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index b3b21ec1..f0287476 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -36,8 +36,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -208,10 +208,18 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 5144d4e1..c1132811 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -14,7 +14,7 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -31,8 +31,8 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -220,6 +220,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 0a100b66..34937440 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -34,8 +34,8 @@ package unix //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -91,6 +91,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6230f640..b0b15055 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -41,8 +41,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -179,6 +179,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } @@ -187,10 +191,6 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err @@ -224,3 +224,7 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index f81dbdc9..2363f749 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -10,7 +10,7 @@ import ( "unsafe" ) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -34,8 +34,8 @@ import ( //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -120,6 +120,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index b6956561..d389f151 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -8,7 +8,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 @@ -30,8 +30,8 @@ package unix //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -107,6 +107,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5ef30904..45b50a61 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -106,23 +106,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -func SysctlClockinfo(name string) (*Clockinfo, error) { - mib, err := sysctlmib(name) - if err != nil { - return nil, err - } - - n := uintptr(SizeofClockinfo) - var ci Clockinfo - if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { - return nil, err - } - if n != SizeofClockinfo { - return nil, EIO - } - return &ci, nil -} - //sysnb pipe() (fd1 int, fd2 int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -187,42 +170,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget @@ -284,6 +232,14 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Fstatvfs(fd int, buf *Statvfs_t) (err error) { + return Fstatvfs1(fd, buf, ST_WAIT) +} + +func Statvfs(path string, buf *Statvfs_t) (err error) { + return Statvfs1(path, buf, ST_WAIT) +} + /* * Exposed directly */ @@ -297,6 +253,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) +//sys Dup3(from int, to int, flags int) (err error) //sys Exit(code int) //sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) //sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) @@ -322,6 +279,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) = SYS_FSTATVFS1 //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) @@ -365,7 +323,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -378,6 +336,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) //sys Stat(path string, stat *Stat_t) (err error) +//sys Statvfs1(path string, buf *Statvfs_t, flags int) (err error) = SYS_STATVFS1 //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24f74e58..24da8b52 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 6878bf7f..25a0ac82 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index dbbfcf71..21591ecd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index f3434465..80474963 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 1a074b2f..a266e92a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -55,23 +55,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -func SysctlClockinfo(name string) (*Clockinfo, error) { - mib, err := sysctlmib(name) - if err != nil { - return nil, err - } - - n := uintptr(SizeofClockinfo) - var ci Clockinfo - if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { - return nil, err - } - if n != SizeofClockinfo { - return nil, EIO - } - return &ci, nil -} - func SysctlUvmexp(name string) (*Uvmexp, error) { mib, err := sysctlmib(name) if err != nil { @@ -89,16 +72,20 @@ func SysctlUvmexp(name string) (*Uvmexp, error) { return &u, nil } -//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { + return Pipe2(p, 0) +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) +func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL } var pp [2]_C_int - err = pipe(&pp) + err := pipe2(&pp, flags) p[0] = int(pp[0]) p[1] = int(pp[1]) - return + return err } //sys Getdents(fd int, buf []byte) (n int, err error) @@ -178,42 +165,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) @@ -283,6 +235,7 @@ func Uname(uname *Utsname) error { //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) +//sys Dup3(from int, to int, flags int) (err error) //sys Exit(code int) //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) @@ -340,7 +293,7 @@ func Uname(uname *Utsname) error { //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -387,7 +340,6 @@ func Uname(uname *Utsname) error { // clock_settime // closefrom // execve -// fcntl // fhopen // fhstat // fhstatfs diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index d62da60d..42b5a0e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 9a35334c..6ea4b488 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 5d812aae..1c3d26fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 0fb39cf5..a8c458cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0153a316..0e2a696a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -391,7 +391,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -553,40 +553,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetInt(fd int, req uint, value int) (err error) { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetTermio(fd int, req uint) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -679,7 +649,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 91c32ddf..b22a34d7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -18,6 +18,10 @@ func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 3de37566..400ba9fb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -12,6 +12,8 @@ import ( "sync" "syscall" "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -76,7 +78,7 @@ func SignalName(s syscall.Signal) string { // The signal name should start with "SIG". func SignalNum(s string) syscall.Signal { signalNameMapOnce.Do(func() { - signalNameMap = make(map[string]syscall.Signal) + signalNameMap = make(map[string]syscall.Signal, len(signalList)) for _, signal := range signalList { signalNameMap[signal.name] = signal.num } @@ -113,15 +115,12 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, errno } - // Slice memory layout - var sl = struct { - addr uintptr - len int - cap int - }{addr, length, length} - - // Use unsafe to turn sl into a []byte. - b := *(*[]byte)(unsafe.Pointer(&sl)) + // Use unsafe to convert addr into a []byte. + var b []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) + hdr.Data = unsafe.Pointer(addr) + hdr.Cap = length + hdr.Len = length // Register mapping in m and return it. p := &b[cap(b)-1] diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 1def8a58..104994bc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -459,6 +459,15 @@ const ( MAP_SHARED = 0x1 MAP_TYPE = 0xf0 MAP_VARIABLE = 0x0 + MCAST_BLOCK_SOURCE = 0x40 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x3e + MCAST_JOIN_SOURCE_GROUP = 0x42 + MCAST_LEAVE_GROUP = 0x3f + MCAST_LEAVE_SOURCE_GROUP = 0x43 + MCAST_SOURCE_FILTER = 0x49 + MCAST_UNBLOCK_SOURCE = 0x41 MCL_CURRENT = 0x100 MCL_FUTURE = 0x200 MSG_ANY = 0x4 @@ -483,6 +492,7 @@ const ( MS_INVALIDATE = 0x40 MS_PER_SEC = 0x3e8 MS_SYNC = 0x20 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x4000 NL2 = 0x8000 @@ -688,7 +698,7 @@ const ( SIOCGHIWAT = 0x40047301 SIOCGIFADDR = -0x3fd796df SIOCGIFADDRS = 0x2000698c - SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBAUDRATE = -0x3fdf9669 SIOCGIFBRDADDR = -0x3fd796dd SIOCGIFCONF = -0x3ff796bb SIOCGIFCONFGLOB = -0x3ff79670 diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 03187dea..4fc8d306 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -459,6 +459,15 @@ const ( MAP_SHARED = 0x1 MAP_TYPE = 0xf0 MAP_VARIABLE = 0x0 + MCAST_BLOCK_SOURCE = 0x40 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x3e + MCAST_JOIN_SOURCE_GROUP = 0x42 + MCAST_LEAVE_GROUP = 0x3f + MCAST_LEAVE_SOURCE_GROUP = 0x43 + MCAST_SOURCE_FILTER = 0x49 + MCAST_UNBLOCK_SOURCE = 0x41 MCL_CURRENT = 0x100 MCL_FUTURE = 0x200 MSG_ANY = 0x4 @@ -483,6 +492,7 @@ const ( MS_INVALIDATE = 0x40 MS_PER_SEC = 0x3e8 MS_SYNC = 0x20 + NFDBITS = 0x40 NL0 = 0x0 NL1 = 0x4000 NL2 = 0x8000 @@ -688,7 +698,7 @@ const ( SIOCGHIWAT = 0x40047301 SIOCGIFADDR = -0x3fd796df SIOCGIFADDRS = 0x2000698c - SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBAUDRATE = -0x3fdf9669 SIOCGIFBRDADDR = -0x3fd796dd SIOCGIFCONF = -0x3fef96bb SIOCGIFCONFGLOB = -0x3fef9670 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 3b39d740..6217cdba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -3,7 +3,7 @@ // +build 386,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 8fe55477..e3ff2ee3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -3,7 +3,7 @@ // +build amd64,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 7a977770..3e417571 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -3,7 +3,7 @@ // +build arm,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 6d56d8a0..cbd8ed18 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -3,7 +3,7 @@ // +build arm64,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index bbe6089b..61304717 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -938,6 +938,7 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_MAXID = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index d2bbaabc..84824587 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -3,7 +3,7 @@ // +build 386,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc144648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x804c6490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc06c648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -379,11 +395,14 @@ const ( DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -393,6 +412,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -406,7 +426,6 @@ const ( DLT_GPRS_LLC = 0xa9 DLT_GSMTAP_ABIS = 0xda DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 @@ -429,6 +448,7 @@ const ( DLT_IPV4 = 0xe4 DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 DLT_JUNIPER_ATM_CEMIC = 0xee @@ -461,8 +481,9 @@ const ( DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MAX = 0x113 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -478,14 +499,16 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 + DLT_PPP_BSDOS = 0xe DLT_PPP_ETHER = 0x33 DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 @@ -496,19 +519,25 @@ const ( DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf + DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -527,10 +556,14 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -548,6 +581,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -555,11 +589,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -576,6 +611,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -617,6 +653,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -633,6 +670,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -807,6 +845,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -827,13 +866,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -845,6 +884,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -905,10 +945,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -918,6 +956,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -926,6 +965,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -975,6 +1015,7 @@ const ( MAP_EXCL = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 MAP_HASSEMAPHORE = 0x200 MAP_NOCORE = 0x20000 MAP_NOSYNC = 0x800 @@ -986,6 +1027,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1026,10 +1076,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1055,8 +1107,10 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1211,7 +1265,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1221,15 +1274,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1245,6 +1300,7 @@ const ( SIOCGETSGCNT = 0xc0147210 SIOCGETVIFCNT = 0xc014720f SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 SIOCGIFBRDADDR = 0xc0206923 @@ -1266,8 +1322,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1298,6 +1357,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1316,6 +1376,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1324,6 +1385,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1336,11 +1398,19 @@ const ( SO_RCVTIMEO = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1384,10 +1454,45 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1395,6 +1500,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1410,8 +1521,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_SESS_CWV = 0x42a + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1475,6 +1608,8 @@ const ( TIOCTIMESTAMP = 0x40087459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1486,6 +1621,8 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_BCACHE_SIZE_MAX = 0x70e0000 + VM_SWZONE_SIZE_MAX = 0x2280000 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 4f8db783..4acd101c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80506490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc080648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -379,11 +395,14 @@ const ( DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -393,6 +412,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -406,7 +426,6 @@ const ( DLT_GPRS_LLC = 0xa9 DLT_GSMTAP_ABIS = 0xda DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 @@ -429,6 +448,7 @@ const ( DLT_IPV4 = 0xe4 DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 DLT_JUNIPER_ATM_CEMIC = 0xee @@ -461,8 +481,9 @@ const ( DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MAX = 0x113 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -478,14 +499,16 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 + DLT_PPP_BSDOS = 0xe DLT_PPP_ETHER = 0x33 DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 @@ -496,19 +519,25 @@ const ( DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf + DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -527,10 +556,14 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -548,6 +581,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -555,11 +589,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -576,6 +611,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -617,6 +653,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -633,6 +670,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -807,6 +845,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -827,13 +866,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -845,6 +884,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -905,10 +945,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -918,6 +956,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -926,6 +965,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -976,6 +1016,7 @@ const ( MAP_EXCL = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 MAP_HASSEMAPHORE = 0x200 MAP_NOCORE = 0x20000 MAP_NOSYNC = 0x800 @@ -987,6 +1028,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1027,10 +1077,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1056,8 +1108,10 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1212,7 +1266,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1222,15 +1275,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1246,6 +1301,7 @@ const ( SIOCGETSGCNT = 0xc0207210 SIOCGETVIFCNT = 0xc028720f SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 SIOCGIFBRDADDR = 0xc0206923 @@ -1267,8 +1323,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1299,6 +1358,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1317,6 +1377,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1325,6 +1386,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1337,11 +1399,19 @@ const ( SO_RCVTIMEO = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1385,10 +1455,45 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1396,6 +1501,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1411,8 +1522,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_SESS_CWV = 0x42a + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1476,6 +1609,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 53e5de60..e4719873 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -3,7 +3,7 @@ // +build arm,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go package unix @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc144648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x804c6490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc06c648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -1063,6 +1079,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index d4a192fe..5e49769d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -3,7 +3,7 @@ // +build arm64,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80506490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc080648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -379,11 +395,14 @@ const ( DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -393,6 +412,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -406,7 +426,6 @@ const ( DLT_GPRS_LLC = 0xa9 DLT_GSMTAP_ABIS = 0xda DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 @@ -429,6 +448,7 @@ const ( DLT_IPV4 = 0xe4 DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 DLT_JUNIPER_ATM_CEMIC = 0xee @@ -461,8 +481,9 @@ const ( DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MAX = 0x113 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -478,14 +499,16 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 + DLT_PPP_BSDOS = 0xe DLT_PPP_ETHER = 0x33 DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 @@ -496,19 +519,25 @@ const ( DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf + DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -527,10 +556,14 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -548,6 +581,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -555,11 +589,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -576,6 +611,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -617,6 +653,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -633,6 +670,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -807,6 +845,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -827,13 +866,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -845,6 +884,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -905,10 +945,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -918,6 +956,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -926,6 +965,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -976,6 +1016,7 @@ const ( MAP_EXCL = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 MAP_HASSEMAPHORE = 0x200 MAP_NOCORE = 0x20000 MAP_NOSYNC = 0x800 @@ -987,6 +1028,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1027,10 +1077,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1056,8 +1108,10 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1212,7 +1266,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1222,15 +1275,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1246,6 +1301,7 @@ const ( SIOCGETSGCNT = 0xc0207210 SIOCGETVIFCNT = 0xc028720f SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 SIOCGIFBRDADDR = 0xc0206923 @@ -1267,8 +1323,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1299,6 +1358,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1317,6 +1377,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1325,6 +1386,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1337,11 +1399,19 @@ const ( SO_RCVTIMEO = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1385,10 +1455,45 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1396,6 +1501,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1411,8 +1522,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_SESS_CWV = 0x42a + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1476,6 +1609,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1487,6 +1622,7 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_BCACHE_SIZE_MAX = 0x19000000 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go new file mode 100644 index 00000000..6e3cfec4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -0,0 +1,2471 @@ +// Code generated by mkmerge.go; DO NOT EDIT. + +// +build linux + +package unix + +import "syscall" + +const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2d + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + AF_XDP = 0x2c + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 + B0 = 0x0 + B110 = 0x3 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2400 = 0xb + B300 = 0x7 + B38400 = 0xf + B4800 = 0xc + B50 = 0x1 + B600 = 0x8 + B75 = 0x2 + B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 + BINFMTFS_MAGIC = 0x42494e4d + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 + BPF_ALU = 0x4 + BPF_ALU64 = 0x7 + BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 + BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 + BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 + BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CLONE = 0x200 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_MMAPABLE = 0x400 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REPLACE = 0x4 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 + BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 + BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 + BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MOV = 0xb0 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 + BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 + BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XADD = 0xc0 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_J1939 = 0x7 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x8 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 + CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_ARGS_SIZE_VER0 = 0x40 + CLONE_ARGS_SIZE_VER1 = 0x50 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_CLEAR_SIGHAND = 0x100000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWTIME = 0x80 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 + CR0 = 0x0 + CRAMFS_MAGIC = 0x28cd3d45 + CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 + CS5 = 0x0 + CSIGNAL = 0xff + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVLINK_CMD_ESWITCH_MODE_GET = 0x1d + DEVLINK_CMD_ESWITCH_MODE_SET = 0x1e + DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" + DEVLINK_GENL_NAME = "devlink" + DEVLINK_GENL_VERSION = 0x1 + DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 + DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECRYPTFS_SUPER_MAGIC = 0xf15f + EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 + EXTA = 0xe + EXTB = 0xf + F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 + FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" + FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 + FSCRYPT_KEY_IDENTIFIER_SIZE = 0x10 + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY = 0x1 + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS = 0x2 + FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR = 0x1 + FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER = 0x2 + FSCRYPT_KEY_STATUS_ABSENT = 0x1 + FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF = 0x1 + FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED = 0x3 + FSCRYPT_KEY_STATUS_PRESENT = 0x2 + FSCRYPT_MAX_KEY_SIZE = 0x40 + FSCRYPT_MODE_ADIANTUM = 0x9 + FSCRYPT_MODE_AES_128_CBC = 0x5 + FSCRYPT_MODE_AES_128_CTS = 0x6 + FSCRYPT_MODE_AES_256_CTS = 0x4 + FSCRYPT_MODE_AES_256_XTS = 0x1 + FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 + FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 + FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 + FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1 + FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3 + FSCRYPT_POLICY_FLAGS_VALID = 0xf + FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4 + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8 + FSCRYPT_POLICY_V1 = 0x0 + FSCRYPT_POLICY_V2 = 0x2 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 + FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617 + FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a + FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 + FS_IOC_MEASURE_VERITY = 0xc0046686 + FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618 + FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0xf + FS_VERITY_FL = 0x100000 + FS_VERITY_HASH_ALG_SHA256 = 0x1 + FS_VERITY_HASH_ALG_SHA512 = 0x2 + FUTEXFS_SUPER_MAGIC = 0xbad1dea + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_INSECURE = 0x4 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 + IBSHIFT = 0x10 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0xa + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MASK_CREATE = 0x10000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_L2TP = 0x73 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_ALL = 0x1d + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISOFS_SUPER_MAGIC = 0x9660 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PARISC = 0xf0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 + KEYCTL_NEGATE = 0xd + KEYCTL_PKEY_DECRYPT = 0x1a + KEYCTL_PKEY_ENCRYPT = 0x19 + KEYCTL_PKEY_QUERY = 0x18 + KEYCTL_PKEY_SIGN = 0x1b + KEYCTL_PKEY_VERIFY = 0x1c + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_SUPPORTS_DECRYPT = 0x2 + KEYCTL_SUPPORTS_ENCRYPT = 0x1 + KEYCTL_SUPPORTS_SIGN = 0x4 + KEYCTL_SUPPORTS_VERIFY = 0x8 + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 + MADV_COLD = 0x14 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0x15 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 + MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 + MSDOS_SUPER_MAGIC = 0x4d44 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 + NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_GET_STRICT_CHK = 0xc + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x5 + NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + ONLRET = 0x20 + ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 + OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 + O_ACCMODE = 0x3 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_IGNORE_OUTGOING = 0x17 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PIPEFS_MAGIC = 0x50495045 + PPC_CMM_MAGIC = 0xc7571590 + PPPIOCGNPMODE = 0xc008744c + PPPIOCNEWUNIT = 0xc004743e + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_IO_FLUSHER = 0x3a + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 + PR_GET_TAGGED_ADDR_CTRL = 0x38 + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_IO_FLUSHER = 0x39 + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 + PR_SET_TAGGED_ADDR_CTRL = 0x37 + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_INDIRECT_BRANCH = 0x1 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TAGGED_ADDR_ENABLE = 0x1 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 + PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1e + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_IRQF = 0x80 + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_UF = 0x10 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTMGRP_DECnet_IFADDR = 0x1000 + RTMGRP_DECnet_ROUTE = 0x4000 + RTMGRP_IPV4_IFADDR = 0x10 + RTMGRP_IPV4_MROUTE = 0x20 + RTMGRP_IPV4_ROUTE = 0x40 + RTMGRP_IPV4_RULE = 0x80 + RTMGRP_IPV6_IFADDR = 0x100 + RTMGRP_IPV6_IFINFO = 0x800 + RTMGRP_IPV6_MROUTE = 0x200 + RTMGRP_IPV6_PREFIX = 0x20000 + RTMGRP_IPV6_ROUTE = 0x400 + RTMGRP_LINK = 0x1 + RTMGRP_NEIGH = 0x4 + RTMGRP_NOTIFY = 0x2 + RTMGRP_TC = 0x8 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 + RTM_DELLINK = 0x11 + RTM_DELLINKPROP = 0x6d + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_DELVLAN = 0x71 + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_OFFLOAD = 0x4000 + RTM_F_PREFIX = 0x800 + RTM_F_TRAP = 0x8000 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETLINKPROP = 0x6e + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_GETVLAN = 0x72 + RTM_MAX = 0x73 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 + RTM_NEWLINK = 0x10 + RTM_NEWLINKPROP = 0x6c + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 + RTM_NEWNSID = 0x58 + RTM_NEWNVLAN = 0x70 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x19 + RTM_NR_MSGTYPES = 0x64 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 + RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SC_LOG_FLUSH = 0x100000 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_OLD = 0x8906 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b + SOCK_DCCP = 0x6 + SOCK_IOC_TYPE = 0x89 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOL_XDP = 0x11b + SOMAXCONN = 0x1000 + SO_ATTACH_FILTER = 0x1a + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 + SO_EE_CODE_TXTIME_MISSED = 0x2 + SO_EE_CODE_ZEROCOPY_COPIED = 0x1 + SO_EE_ORIGIN_ICMP = 0x2 + SO_EE_ORIGIN_ICMP6 = 0x3 + SO_EE_ORIGIN_LOCAL = 0x1 + SO_EE_ORIGIN_NONE = 0x0 + SO_EE_ORIGIN_TIMESTAMPING = 0x4 + SO_EE_ORIGIN_TXSTATUS = 0x4 + SO_EE_ORIGIN_TXTIME = 0x6 + SO_EE_ORIGIN_ZEROCOPY = 0x5 + SO_GET_FILTER = 0x1a + SO_NO_CHECK = 0xb + SO_PEERNAME = 0x1c + SO_PRIORITY = 0xc + SO_TIMESTAMP = 0x1d + SO_TIMESTAMP_OLD = 0x1d + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_ATTR_VERITY = 0x100000 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 + SYSFS_MAGIC = 0x62656572 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0xa + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea + TCP_CC_INFO = 0x1a + TCP_CM_INQ = 0x24 + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 + TCP_INFO = 0xb + TCP_INQ = 0x24 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OFF = 0x0 + TCP_REPAIR_OFF_NO_WP = -0x1 + TCP_REPAIR_ON = 0x1 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_TX_DELAY = 0x25 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCP_ZEROCOPY_RECEIVE = 0x23 + TFD_TIMER_ABSTIME = 0x1 + TFD_TIMER_CANCEL_ON_SET = 0x2 + TIMER_ABSTIME = 0x1 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RTS = 0x4 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_AEAD_ALG_NAME = 0x20 + TIPC_AEAD_KEYLEN_MAX = 0x24 + TIPC_AEAD_KEYLEN_MIN = 0x14 + TIPC_AEAD_KEY_SIZE_MAX = 0x48 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODELAY = 0x8a + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff + TMPFS_MAGIC = 0x1021994 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 + TS_COMM_LEN = 0x20 + UDF_SUPER_MAGIC = 0x15013346 + UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_LOCAL = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VT0 = 0x0 + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 + XDP_PACKET_HEADROOM = 0x100 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RING_NEED_WAKEUP = 0x1 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 + XDP_USE_NEED_WAKEUP = 0x8 + XDP_ZEROCOPY = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 + XFS_SUPER_MAGIC = 0x58465342 + Z3FOLD_MAGIC = 0x33 + ZONEFS_MAGIC = 0x5a4f4653 + ZSMALLOC_MAGIC = 0x58295829 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EAGAIN = syscall.Errno(0xb) + EBADF = syscall.Errno(0x9) + EBUSY = syscall.Errno(0x10) + ECHILD = syscall.Errno(0xa) + EDOM = syscall.Errno(0x21) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISDIR = syscall.Errno(0x15) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + ENFILE = syscall.Errno(0x17) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOMEM = syscall.Errno(0xc) + ENOSPC = syscall.Errno(0x1c) + ENOTBLK = syscall.Errno(0xf) + ENOTDIR = syscall.Errno(0x14) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EPERM = syscall.Errno(0x1) + EPIPE = syscall.Errno(0x20) + ERANGE = syscall.Errno(0x22) + EROFS = syscall.Errno(0x1e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ETXTBSY = syscall.Errno(0x1a) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) +) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 5213d820..5e974110 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -11,2679 +11,489 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80041270 - BLKBSZSET = 0x40041271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80041272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FP_XSTATE_MAGIC2 = 0x46505845 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xc - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0xd - F_SETLK64 = 0xd - F_SETLKW = 0xe - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_32BIT = 0x40 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x8000 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8008743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40087446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x400c744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40087447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPXREGS = 0x12 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPXREGS = 0x13 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SINGLEBLOCK = 0x21 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSEMU = 0x1f - PTRACE_SYSEMU_SINGLESTEP = 0x20 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8004700d - RTC_EPOCH_SET = 0x4004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8004700b - RTC_IRQP_SET = 0x4004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x801c7011 - RTC_PLL_SET = 0x401c7012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x400854d5 - TUNDETACHFILTER = 0x400854d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x800854db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - X86_FXSR_MAGIC = 0x0 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80041270 + BLKBSZSET = 0x40041271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80041272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x8000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8008743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40087446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x400c744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40087447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x800854db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x20 + X86_FXSR_MAGIC = 0x0 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2692,23 +502,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2725,8 +527,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2734,99 +534,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 39b630cc..47a57fe4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -11,2679 +11,489 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FP_XSTATE_MAGIC2 = 0x46505845 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_32BIT = 0x40 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ARCH_PRCTL = 0x1e - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPXREGS = 0x12 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPXREGS = 0x13 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SINGLEBLOCK = 0x21 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSEMU = 0x1f - PTRACE_SYSEMU_SINGLESTEP = 0x20 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_ARCH_PRCTL = 0x1e + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2692,23 +502,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2725,8 +527,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2734,99 +534,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c59a1beb..df2eea4b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -11,2685 +11,495 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80041270 - BLKBSZSET = 0x40041271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80041272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xc - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0xd - F_SETLK64 = 0xd - F_SETLKW = 0xe - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x20000 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8008743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40087446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x400c744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40087447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETCRUNCHREGS = 0x19 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFDPIC = 0x1f - PTRACE_GETFDPIC_EXEC = 0x0 - PTRACE_GETFDPIC_INTERP = 0x1 - PTRACE_GETFPREGS = 0xe - PTRACE_GETHBPREGS = 0x1d - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GETVFPREGS = 0x1b - PTRACE_GETWMMXREGS = 0x12 - PTRACE_GET_THREAD_AREA = 0x16 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETCRUNCHREGS = 0x1a - PTRACE_SETFPREGS = 0xf - PTRACE_SETHBPREGS = 0x1e - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SETVFPREGS = 0x1c - PTRACE_SETWMMXREGS = 0x13 - PTRACE_SET_SYSCALL = 0x17 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - PT_DATA_ADDR = 0x10004 - PT_TEXT_ADDR = 0x10000 - PT_TEXT_END_ADDR = 0x10008 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8004700d - RTC_EPOCH_SET = 0x4004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8004700b - RTC_IRQP_SET = 0x4004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x801c7011 - RTC_PLL_SET = 0x401c7012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x400854d5 - TUNDETACHFILTER = 0x400854d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x800854db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80041270 + BLKBSZSET = 0x40041271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80041272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x20000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8008743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40087446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x400c744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40087447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETCRUNCHREGS = 0x19 + PTRACE_GETFDPIC = 0x1f + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 + PTRACE_GETFPREGS = 0xe + PTRACE_GETHBPREGS = 0x1d + PTRACE_GETVFPREGS = 0x1b + PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x16 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_SETCRUNCHREGS = 0x1a + PTRACE_SETFPREGS = 0xf + PTRACE_SETHBPREGS = 0x1e + PTRACE_SETVFPREGS = 0x1c + PTRACE_SETWMMXREGS = 0x13 + PTRACE_SET_SYSCALL = 0x17 + PT_DATA_ADDR = 0x10004 + PT_TEXT_ADDR = 0x10000 + PT_TEXT_END_ADDR = 0x10008 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x800854db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x20 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2698,23 +508,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2731,8 +533,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2740,99 +540,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 5f35c19d..4e121421 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -11,2670 +11,482 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ESR_MAGIC = 0x45535201 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - EXTRA_MAGIC = 0x45585401 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FPSIMD_MAGIC = 0x46508001 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SVE_MAGIC = 0x53564501 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + ESR_MAGIC = 0x45535201 + EXTPROC = 0x10000 + EXTRA_MAGIC = 0x45585401 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FPSIMD_MAGIC = 0x46508001 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SVE_MAGIC = 0x53564501 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2683,23 +495,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2716,8 +520,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2725,99 +527,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 7f1b7bef..a23b0802 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -11,2681 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40041270 - BLKBSZSET = 0x80041271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40041272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x21 - F_GETLK64 = 0x21 - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x22 - F_SETLK64 = 0x22 - F_SETLKW = 0x23 - F_SETLKW64 = 0x23 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x2000 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4008743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80087446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x800c744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80087447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4004700d - RTC_EPOCH_SET = 0x8004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4004700b - RTC_IRQP_SET = 0x8004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x401c7011 - RTC_PLL_SET = 0x801c7012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x800854d5 - TUNDETACHFILTER = 0x800854d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x400854db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x21 + F_GETLK64 = 0x21 + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x22 + F_SETLK64 = 0x22 + F_SETLKW = 0x23 + F_SETLKW64 = 0x23 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x20 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2694,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2707,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2728,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2737,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 603d88b8..a5a921e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -11,2681 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xe - F_GETLK64 = 0xe - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x0 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x0 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2694,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2707,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2728,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2737,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ed178f8a..d088e197 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -11,2681 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xe - F_GETLK64 = 0xe - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x0 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x0 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2694,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2707,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2728,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2737,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 080b7893..0ddf9d5f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -11,2681 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40041270 - BLKBSZSET = 0x80041271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40041272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x21 - F_GETLK64 = 0x21 - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x22 - F_SETLK64 = 0x22 - F_SETLKW = 0x23 - F_SETLKW64 = 0x23 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x2000 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4008743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80087446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x800c744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80087447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4004700d - RTC_EPOCH_SET = 0x8004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4004700b - RTC_IRQP_SET = 0x8004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x401c7011 - RTC_PLL_SET = 0x801c7012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x800854d5 - TUNDETACHFILTER = 0x800854d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x400854db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x21 + F_GETLK64 = 0x21 + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x22 + F_SETLK64 = 0x22 + F_SETLKW = 0x23 + F_SETLKW64 = 0x23 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x20 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2694,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2707,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2728,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2737,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 961e8eab..a93ffc18 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -11,2740 +11,551 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x17 - B110 = 0x3 - B115200 = 0x11 - B1152000 = 0x18 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x19 - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x1a - B230400 = 0x12 - B2400 = 0xb - B2500000 = 0x1b - B300 = 0x7 - B3000000 = 0x1c - B3500000 = 0x1d - B38400 = 0xf - B4000000 = 0x1e - B460800 = 0x13 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x14 - B57600 = 0x10 - B576000 = 0x15 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x16 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1f - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0xff - CBAUDEX = 0x0 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0xff0000 - CLOCAL = 0x8000 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIGNAL = 0xff - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0xd - F_SETLKW = 0x7 - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x4000 - IBSHIFT = 0x10 - ICANON = 0x100 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x400 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x80 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x1000 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x80 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x300 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80000000 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x4 - ONLCR = 0x2 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x20000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x1000 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_SAO = 0x10 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETEVRREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GETVRREGS = 0x12 - PTRACE_GETVSRREGS = 0x1b - PTRACE_GET_DEBUGREG = 0x19 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETEVRREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SETVRREGS = 0x13 - PTRACE_SETVSRREGS = 0x1c - PTRACE_SET_DEBUGREG = 0x1a - PTRACE_SINGLEBLOCK = 0x100 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSEMU = 0x1d - PTRACE_SYSEMU_SINGLESTEP = 0x1e - PTRACE_TRACEME = 0x0 - PT_CCR = 0x26 - PT_CTR = 0x23 - PT_DAR = 0x29 - PT_DSCR = 0x2c - PT_DSISR = 0x2a - PT_FPR0 = 0x30 - PT_FPSCR = 0x50 - PT_LNK = 0x24 - PT_MSR = 0x21 - PT_NIP = 0x20 - PT_ORIG_R3 = 0x22 - PT_R0 = 0x0 - PT_R1 = 0x1 - PT_R10 = 0xa - PT_R11 = 0xb - PT_R12 = 0xc - PT_R13 = 0xd - PT_R14 = 0xe - PT_R15 = 0xf - PT_R16 = 0x10 - PT_R17 = 0x11 - PT_R18 = 0x12 - PT_R19 = 0x13 - PT_R2 = 0x2 - PT_R20 = 0x14 - PT_R21 = 0x15 - PT_R22 = 0x16 - PT_R23 = 0x17 - PT_R24 = 0x18 - PT_R25 = 0x19 - PT_R26 = 0x1a - PT_R27 = 0x1b - PT_R28 = 0x1c - PT_R29 = 0x1d - PT_R3 = 0x3 - PT_R30 = 0x1e - PT_R31 = 0x1f - PT_R4 = 0x4 - PT_R5 = 0x5 - PT_R6 = 0x6 - PT_R7 = 0x7 - PT_R8 = 0x8 - PT_R9 = 0x9 - PT_REGS_COUNT = 0x2c - PT_RESULT = 0x2b - PT_SOFTE = 0x27 - PT_TRAP = 0x28 - PT_VR0 = 0x52 - PT_VRSAVE = 0x94 - PT_VSCR = 0x93 - PT_VSR0 = 0x96 - PT_VSR31 = 0xd4 - PT_XER = 0x25 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x14 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x15 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x10 - SO_RCVTIMEO = 0x12 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x12 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x11 - SO_SNDTIMEO = 0x13 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x13 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0xc00 - TABDLY = 0xc00 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x2000741f - TCGETA = 0x40147417 - TCGETS = 0x402c7413 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x2000741d - TCSBRKP = 0x5425 - TCSETA = 0x80147418 - TCSETAF = 0x8014741c - TCSETAW = 0x80147419 - TCSETS = 0x802c7414 - TCSETSF = 0x802c7416 - TCSETSW = 0x802c7415 - TCXONC = 0x2000741e - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x40045432 - TIOCGETC = 0x40067412 - TIOCGETD = 0x5424 - TIOCGETP = 0x40067408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGLTC = 0x40067474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_LOOP = 0x8000 - TIOCM_OUT1 = 0x2000 - TIOCM_OUT2 = 0x4000 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETC = 0x80067411 - TIOCSETD = 0x5423 - TIOCSETN = 0x8006740a - TIOCSETP = 0x80067409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSLTC = 0x80067475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTART = 0x2000746e - TIOCSTI = 0x5412 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x400000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0x10 - VEOF = 0x4 - VEOL = 0x6 - VEOL2 = 0x8 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x5 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xb - VSTART = 0xd - VSTOP = 0xe - VSUSP = 0xc - VSWTC = 0x9 - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x7 - VWERASE = 0xa - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4000 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0xc00 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x17 + B115200 = 0x11 + B1152000 = 0x18 + B1500000 = 0x19 + B2000000 = 0x1a + B230400 = 0x12 + B2500000 = 0x1b + B3000000 = 0x1c + B3500000 = 0x1d + B4000000 = 0x1e + B460800 = 0x13 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B921600 = 0x16 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTOPB = 0x400 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000000 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x4000 + ICANON = 0x100 + IEXTEN = 0x400 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x80 + IUCLC = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + NFDBITS = 0x40 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x4 + ONLCR = 0x2 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x1000 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PROT_SAO = 0x10 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS64 = 0x16 + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETREGS64 = 0x17 + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SYSEMU = 0x1d + PTRACE_SYSEMU_SINGLESTEP = 0x1e + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VMIN = 0x5 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4000 + XTABS = 0xc00 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2753,23 +564,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x3a) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2786,8 +589,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2795,99 +596,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 6e0538f2..c1ea48b9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -11,2740 +11,551 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x17 - B110 = 0x3 - B115200 = 0x11 - B1152000 = 0x18 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x19 - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x1a - B230400 = 0x12 - B2400 = 0xb - B2500000 = 0x1b - B300 = 0x7 - B3000000 = 0x1c - B3500000 = 0x1d - B38400 = 0xf - B4000000 = 0x1e - B460800 = 0x13 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x14 - B57600 = 0x10 - B576000 = 0x15 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x16 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1f - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0xff - CBAUDEX = 0x0 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0xff0000 - CLOCAL = 0x8000 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIGNAL = 0xff - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0xd - F_SETLKW = 0x7 - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x4000 - IBSHIFT = 0x10 - ICANON = 0x100 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x400 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x80 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x1000 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x80 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x300 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80000000 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x4 - ONLCR = 0x2 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x20000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x1000 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_SAO = 0x10 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETEVRREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GETVRREGS = 0x12 - PTRACE_GETVSRREGS = 0x1b - PTRACE_GET_DEBUGREG = 0x19 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETEVRREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SETVRREGS = 0x13 - PTRACE_SETVSRREGS = 0x1c - PTRACE_SET_DEBUGREG = 0x1a - PTRACE_SINGLEBLOCK = 0x100 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSEMU = 0x1d - PTRACE_SYSEMU_SINGLESTEP = 0x1e - PTRACE_TRACEME = 0x0 - PT_CCR = 0x26 - PT_CTR = 0x23 - PT_DAR = 0x29 - PT_DSCR = 0x2c - PT_DSISR = 0x2a - PT_FPR0 = 0x30 - PT_FPSCR = 0x50 - PT_LNK = 0x24 - PT_MSR = 0x21 - PT_NIP = 0x20 - PT_ORIG_R3 = 0x22 - PT_R0 = 0x0 - PT_R1 = 0x1 - PT_R10 = 0xa - PT_R11 = 0xb - PT_R12 = 0xc - PT_R13 = 0xd - PT_R14 = 0xe - PT_R15 = 0xf - PT_R16 = 0x10 - PT_R17 = 0x11 - PT_R18 = 0x12 - PT_R19 = 0x13 - PT_R2 = 0x2 - PT_R20 = 0x14 - PT_R21 = 0x15 - PT_R22 = 0x16 - PT_R23 = 0x17 - PT_R24 = 0x18 - PT_R25 = 0x19 - PT_R26 = 0x1a - PT_R27 = 0x1b - PT_R28 = 0x1c - PT_R29 = 0x1d - PT_R3 = 0x3 - PT_R30 = 0x1e - PT_R31 = 0x1f - PT_R4 = 0x4 - PT_R5 = 0x5 - PT_R6 = 0x6 - PT_R7 = 0x7 - PT_R8 = 0x8 - PT_R9 = 0x9 - PT_REGS_COUNT = 0x2c - PT_RESULT = 0x2b - PT_SOFTE = 0x27 - PT_TRAP = 0x28 - PT_VR0 = 0x52 - PT_VRSAVE = 0x94 - PT_VSCR = 0x93 - PT_VSR0 = 0x96 - PT_VSR31 = 0xd4 - PT_XER = 0x25 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x14 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x15 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x10 - SO_RCVTIMEO = 0x12 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x12 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x11 - SO_SNDTIMEO = 0x13 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x13 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0xc00 - TABDLY = 0xc00 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x2000741f - TCGETA = 0x40147417 - TCGETS = 0x402c7413 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x2000741d - TCSBRKP = 0x5425 - TCSETA = 0x80147418 - TCSETAF = 0x8014741c - TCSETAW = 0x80147419 - TCSETS = 0x802c7414 - TCSETSF = 0x802c7416 - TCSETSW = 0x802c7415 - TCXONC = 0x2000741e - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x40045432 - TIOCGETC = 0x40067412 - TIOCGETD = 0x5424 - TIOCGETP = 0x40067408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGLTC = 0x40067474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_LOOP = 0x8000 - TIOCM_OUT1 = 0x2000 - TIOCM_OUT2 = 0x4000 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETC = 0x80067411 - TIOCSETD = 0x5423 - TIOCSETN = 0x8006740a - TIOCSETP = 0x80067409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSLTC = 0x80067475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTART = 0x2000746e - TIOCSTI = 0x5412 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x400000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0x10 - VEOF = 0x4 - VEOL = 0x6 - VEOL2 = 0x8 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x5 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xb - VSTART = 0xd - VSTOP = 0xe - VSUSP = 0xc - VSWTC = 0x9 - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x7 - VWERASE = 0xa - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4000 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0xc00 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x17 + B115200 = 0x11 + B1152000 = 0x18 + B1500000 = 0x19 + B2000000 = 0x1a + B230400 = 0x12 + B2500000 = 0x1b + B3000000 = 0x1c + B3500000 = 0x1d + B4000000 = 0x1e + B460800 = 0x13 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B921600 = 0x16 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTOPB = 0x400 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000000 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x4000 + ICANON = 0x100 + IEXTEN = 0x400 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x80 + IUCLC = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + NFDBITS = 0x40 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x4 + ONLCR = 0x2 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x1000 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PROT_SAO = 0x10 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS64 = 0x16 + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETREGS64 = 0x17 + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SYSEMU = 0x1d + PTRACE_SYSEMU_SINGLESTEP = 0x1e + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VMIN = 0x5 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4000 + XTABS = 0xc00 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2753,23 +564,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x3a) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2786,8 +589,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2795,99 +596,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 06c0148c..7def950b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -11,2666 +11,476 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2679,23 +489,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2712,8 +514,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2721,99 +521,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 39875095..d39293c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -11,2739 +11,549 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_DISABLE_TE = 0x5010 - PTRACE_ENABLE_TE = 0x5009 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_LAST_BREAK = 0x5006 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_AREA = 0x5003 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_AREA = 0x5002 - PTRACE_PEEKUSR = 0x3 - PTRACE_PEEKUSR_AREA = 0x5000 - PTRACE_PEEK_SYSTEM_CALL = 0x5007 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_AREA = 0x5005 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_AREA = 0x5004 - PTRACE_POKEUSR = 0x6 - PTRACE_POKEUSR_AREA = 0x5001 - PTRACE_POKE_SYSTEM_CALL = 0x5008 - PTRACE_PROT = 0x15 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLEBLOCK = 0xc - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_TE_ABORT_RAND = 0x5011 - PTRACE_TRACEME = 0x0 - PT_ACR0 = 0x90 - PT_ACR1 = 0x94 - PT_ACR10 = 0xb8 - PT_ACR11 = 0xbc - PT_ACR12 = 0xc0 - PT_ACR13 = 0xc4 - PT_ACR14 = 0xc8 - PT_ACR15 = 0xcc - PT_ACR2 = 0x98 - PT_ACR3 = 0x9c - PT_ACR4 = 0xa0 - PT_ACR5 = 0xa4 - PT_ACR6 = 0xa8 - PT_ACR7 = 0xac - PT_ACR8 = 0xb0 - PT_ACR9 = 0xb4 - PT_CR_10 = 0x168 - PT_CR_11 = 0x170 - PT_CR_9 = 0x160 - PT_ENDREGS = 0x1af - PT_FPC = 0xd8 - PT_FPR0 = 0xe0 - PT_FPR1 = 0xe8 - PT_FPR10 = 0x130 - PT_FPR11 = 0x138 - PT_FPR12 = 0x140 - PT_FPR13 = 0x148 - PT_FPR14 = 0x150 - PT_FPR15 = 0x158 - PT_FPR2 = 0xf0 - PT_FPR3 = 0xf8 - PT_FPR4 = 0x100 - PT_FPR5 = 0x108 - PT_FPR6 = 0x110 - PT_FPR7 = 0x118 - PT_FPR8 = 0x120 - PT_FPR9 = 0x128 - PT_GPR0 = 0x10 - PT_GPR1 = 0x18 - PT_GPR10 = 0x60 - PT_GPR11 = 0x68 - PT_GPR12 = 0x70 - PT_GPR13 = 0x78 - PT_GPR14 = 0x80 - PT_GPR15 = 0x88 - PT_GPR2 = 0x20 - PT_GPR3 = 0x28 - PT_GPR4 = 0x30 - PT_GPR5 = 0x38 - PT_GPR6 = 0x40 - PT_GPR7 = 0x48 - PT_GPR8 = 0x50 - PT_GPR9 = 0x58 - PT_IEEE_IP = 0x1a8 - PT_LASTOFF = 0x1a8 - PT_ORIGGPR2 = 0xd0 - PT_PSWADDR = 0x8 - PT_PSWMASK = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_DISABLE_TE = 0x5010 + PTRACE_ENABLE_TE = 0x5009 + PTRACE_GET_LAST_BREAK = 0x5006 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_AREA = 0x5003 + PTRACE_PEEKTEXT_AREA = 0x5002 + PTRACE_PEEKUSR_AREA = 0x5000 + PTRACE_PEEK_SYSTEM_CALL = 0x5007 + PTRACE_POKEDATA_AREA = 0x5005 + PTRACE_POKETEXT_AREA = 0x5004 + PTRACE_POKEUSR_AREA = 0x5001 + PTRACE_POKE_SYSTEM_CALL = 0x5008 + PTRACE_PROT = 0x15 + PTRACE_SINGLEBLOCK = 0xc + PTRACE_TE_ABORT_RAND = 0x5011 + PT_ACR0 = 0x90 + PT_ACR1 = 0x94 + PT_ACR10 = 0xb8 + PT_ACR11 = 0xbc + PT_ACR12 = 0xc0 + PT_ACR13 = 0xc4 + PT_ACR14 = 0xc8 + PT_ACR15 = 0xcc + PT_ACR2 = 0x98 + PT_ACR3 = 0x9c + PT_ACR4 = 0xa0 + PT_ACR5 = 0xa4 + PT_ACR6 = 0xa8 + PT_ACR7 = 0xac + PT_ACR8 = 0xb0 + PT_ACR9 = 0xb4 + PT_CR_10 = 0x168 + PT_CR_11 = 0x170 + PT_CR_9 = 0x160 + PT_ENDREGS = 0x1af + PT_FPC = 0xd8 + PT_FPR0 = 0xe0 + PT_FPR1 = 0xe8 + PT_FPR10 = 0x130 + PT_FPR11 = 0x138 + PT_FPR12 = 0x140 + PT_FPR13 = 0x148 + PT_FPR14 = 0x150 + PT_FPR15 = 0x158 + PT_FPR2 = 0xf0 + PT_FPR3 = 0xf8 + PT_FPR4 = 0x100 + PT_FPR5 = 0x108 + PT_FPR6 = 0x110 + PT_FPR7 = 0x118 + PT_FPR8 = 0x120 + PT_FPR9 = 0x128 + PT_GPR0 = 0x10 + PT_GPR1 = 0x18 + PT_GPR10 = 0x60 + PT_GPR11 = 0x68 + PT_GPR12 = 0x70 + PT_GPR13 = 0x78 + PT_GPR14 = 0x80 + PT_GPR15 = 0x88 + PT_GPR2 = 0x20 + PT_GPR3 = 0x28 + PT_GPR4 = 0x30 + PT_GPR5 = 0x38 + PT_GPR6 = 0x40 + PT_GPR7 = 0x48 + PT_GPR8 = 0x50 + PT_GPR9 = 0x58 + PT_IEEE_IP = 0x1a8 + PT_LASTOFF = 0x1a8 + PT_ORIGGPR2 = 0xd0 + PT_PSWADDR = 0x8 + PT_PSWMASK = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2752,23 +562,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2785,8 +587,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2794,99 +594,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 8d80f99b..3ff3ec68 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -11,2729 +11,540 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - ASI_LEON_DFLUSH = 0x11 - ASI_LEON_IFLUSH = 0x10 - ASI_LEON_MMUFLUSH = 0x18 - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x400000 - EFD_NONBLOCK = 0x4000 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - EMT_TAGOVF = 0x1 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x400000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x7 - F_GETLK64 = 0x7 - F_GETOWN = 0x5 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x1 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x8 - F_SETLK64 = 0x8 - F_SETLKW = 0x9 - F_SETLKW64 = 0x9 - F_SETOWN = 0x6 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x3 - F_WRLCK = 0x2 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x400000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x4000 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x200 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x100 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x400000 - O_CREAT = 0x200 - O_DIRECT = 0x100000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x2000 - O_EXCL = 0x800 - O_FSYNC = 0x802000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x4004 - O_NOATIME = 0x200000 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x4000 - O_PATH = 0x1000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x802000 - O_SYNC = 0x802000 - O_TMPFILE = 0x2010000 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPAREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPREGS64 = 0x19 - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_READDATA = 0x10 - PTRACE_READTEXT = 0x12 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPAREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPREGS64 = 0x1a - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SPARC_DETACH = 0xb - PTRACE_SYSCALL = 0x18 - PTRACE_TRACEME = 0x0 - PTRACE_WRITEDATA = 0x11 - PTRACE_WRITETEXT = 0x13 - PT_FP = 0x48 - PT_G0 = 0x10 - PT_G1 = 0x14 - PT_G2 = 0x18 - PT_G3 = 0x1c - PT_G4 = 0x20 - PT_G5 = 0x24 - PT_G6 = 0x28 - PT_G7 = 0x2c - PT_I0 = 0x30 - PT_I1 = 0x34 - PT_I2 = 0x38 - PT_I3 = 0x3c - PT_I4 = 0x40 - PT_I5 = 0x44 - PT_I6 = 0x48 - PT_I7 = 0x4c - PT_NPC = 0x8 - PT_PC = 0x4 - PT_PSR = 0x0 - PT_REGS_MAGIC = 0x57ac6c00 - PT_TNPC = 0x90 - PT_TPC = 0x88 - PT_TSTATE = 0x80 - PT_V9_FP = 0x70 - PT_V9_G0 = 0x0 - PT_V9_G1 = 0x8 - PT_V9_G2 = 0x10 - PT_V9_G3 = 0x18 - PT_V9_G4 = 0x20 - PT_V9_G5 = 0x28 - PT_V9_G6 = 0x30 - PT_V9_G7 = 0x38 - PT_V9_I0 = 0x40 - PT_V9_I1 = 0x48 - PT_V9_I2 = 0x50 - PT_V9_I3 = 0x58 - PT_V9_I4 = 0x60 - PT_V9_I5 = 0x68 - PT_V9_I6 = 0x70 - PT_V9_I7 = 0x78 - PT_V9_MAGIC = 0x9c - PT_V9_TNPC = 0x90 - PT_V9_TPC = 0x88 - PT_V9_TSTATE = 0x80 - PT_V9_Y = 0x98 - PT_WIM = 0x10 - PT_Y = 0xc - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x6 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x23 - SCM_TIMESTAMPING_OPT_STATS = 0x38 - SCM_TIMESTAMPING_PKTINFO = 0x3c - SCM_TIMESTAMPNS = 0x21 - SCM_TXTIME = 0x3f - SCM_WIFI_STATUS = 0x25 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x400000 - SFD_NONBLOCK = 0x4000 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x400000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x4000 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x8000 - SO_ATTACH_BPF = 0x34 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x35 - SO_ATTACH_REUSEPORT_EBPF = 0x36 - SO_BINDTODEVICE = 0xd - SO_BINDTOIFINDEX = 0x41 - SO_BPF_EXTENSIONS = 0x32 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0x400 - SO_BUSY_POLL = 0x30 - SO_CNX_ADVICE = 0x37 - SO_COOKIE = 0x3b - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x33 - SO_INCOMING_NAPI_ID = 0x3a - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x28 - SO_MARK = 0x22 - SO_MAX_PACING_RATE = 0x31 - SO_MEMINFO = 0x39 - SO_NOFCS = 0x27 - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x2 - SO_PASSSEC = 0x1f - SO_PEEK_OFF = 0x26 - SO_PEERCRED = 0x40 - SO_PEERGROUPS = 0x3d - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x100b - SO_RCVLOWAT = 0x800 - SO_RCVTIMEO = 0x2000 - SO_RCVTIMEO_NEW = 0x44 - SO_RCVTIMEO_OLD = 0x2000 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x24 - SO_SECURITY_AUTHENTICATION = 0x5001 - SO_SECURITY_ENCRYPTION_NETWORK = 0x5004 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002 - SO_SELECT_ERR_QUEUE = 0x29 - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x100a - SO_SNDLOWAT = 0x1000 - SO_SNDTIMEO = 0x4000 - SO_SNDTIMEO_NEW = 0x45 - SO_SNDTIMEO_OLD = 0x4000 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x23 - SO_TIMESTAMPING_NEW = 0x43 - SO_TIMESTAMPING_OLD = 0x23 - SO_TIMESTAMPNS = 0x21 - SO_TIMESTAMPNS_NEW = 0x42 - SO_TIMESTAMPNS_OLD = 0x21 - SO_TIMESTAMP_NEW = 0x46 - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3f - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x25 - SO_ZEROCOPY = 0x3e - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x20005407 - TCGETA = 0x40125401 - TCGETS = 0x40245408 - TCGETS2 = 0x402c540c - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x20005405 - TCSBRKP = 0x5425 - TCSETA = 0x80125402 - TCSETAF = 0x80125404 - TCSETAW = 0x80125403 - TCSETS = 0x80245409 - TCSETS2 = 0x802c540d - TCSETSF = 0x8024540b - TCSETSF2 = 0x802c540f - TCSETSW = 0x8024540a - TCSETSW2 = 0x802c540e - TCXONC = 0x20005406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x2000747a - TIOCCONS = 0x20007424 - TIOCEXCL = 0x2000740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x40047400 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x40285443 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x40047483 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40047486 - TIOCGPTPEER = 0x20007489 - TIOCGRS485 = 0x40205441 - TIOCGSERIAL = 0x541e - TIOCGSID = 0x40047485 - TIOCGSOFTCAR = 0x40047464 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGET = 0x4004746a - TIOCMIWAIT = 0x545c - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007484 - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSETD = 0x80047401 - TIOCSIG = 0x80047488 - TIOCSISO7816 = 0xc0285444 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x80047482 - TIOCSPTLCK = 0x80047487 - TIOCSRS485 = 0xc0205442 - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x80047465 - TIOCSTART = 0x2000746e - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x20005437 - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - ZSMALLOC_MAGIC = 0x58295829 - __TIOCFLUSH = 0x80047410 + ASI_LEON_DFLUSH = 0x11 + ASI_LEON_IFLUSH = 0x10 + ASI_LEON_MMUFLUSH = 0x18 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x400000 + EFD_NONBLOCK = 0x4000 + EMT_TAGOVF = 0x1 + EPOLL_CLOEXEC = 0x400000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x7 + F_GETLK64 = 0x7 + F_GETOWN = 0x5 + F_RDLCK = 0x1 + F_SETLK = 0x8 + F_SETLK64 = 0x8 + F_SETLKW = 0x9 + F_SETLKW64 = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x3 + F_WRLCK = 0x2 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x400000 + IN_NONBLOCK = 0x4000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x200 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x100 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_RENAME = 0x20 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x100000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x2000 + O_EXCL = 0x800 + O_FSYNC = 0x802000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x4004 + O_NOATIME = 0x200000 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x4000 + O_PATH = 0x1000000 + O_RSYNC = 0x802000 + O_SYNC = 0x802000 + O_TMPFILE = 0x2010000 + O_TRUNC = 0x400 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFPAREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPREGS64 = 0x19 + PTRACE_GETREGS64 = 0x16 + PTRACE_READDATA = 0x10 + PTRACE_READTEXT = 0x12 + PTRACE_SETFPAREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPREGS64 = 0x1a + PTRACE_SETREGS64 = 0x17 + PTRACE_SPARC_DETACH = 0xb + PTRACE_WRITEDATA = 0x11 + PTRACE_WRITETEXT = 0x13 + PT_FP = 0x48 + PT_G0 = 0x10 + PT_G1 = 0x14 + PT_G2 = 0x18 + PT_G3 = 0x1c + PT_G4 = 0x20 + PT_G5 = 0x24 + PT_G6 = 0x28 + PT_G7 = 0x2c + PT_I0 = 0x30 + PT_I1 = 0x34 + PT_I2 = 0x38 + PT_I3 = 0x3c + PT_I4 = 0x40 + PT_I5 = 0x44 + PT_I6 = 0x48 + PT_I7 = 0x4c + PT_NPC = 0x8 + PT_PC = 0x4 + PT_PSR = 0x0 + PT_REGS_MAGIC = 0x57ac6c00 + PT_TNPC = 0x90 + PT_TPC = 0x88 + PT_TSTATE = 0x80 + PT_V9_FP = 0x70 + PT_V9_G0 = 0x0 + PT_V9_G1 = 0x8 + PT_V9_G2 = 0x10 + PT_V9_G3 = 0x18 + PT_V9_G4 = 0x20 + PT_V9_G5 = 0x28 + PT_V9_G6 = 0x30 + PT_V9_G7 = 0x38 + PT_V9_I0 = 0x40 + PT_V9_I1 = 0x48 + PT_V9_I2 = 0x50 + PT_V9_I3 = 0x58 + PT_V9_I4 = 0x60 + PT_V9_I5 = 0x68 + PT_V9_I6 = 0x70 + PT_V9_I7 = 0x78 + PT_V9_MAGIC = 0x9c + PT_V9_TNPC = 0x90 + PT_V9_TPC = 0x88 + PT_V9_TSTATE = 0x80 + PT_V9_Y = 0x98 + PT_WIM = 0x10 + PT_Y = 0xc + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x6 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x23 + SCM_TIMESTAMPING_OPT_STATS = 0x38 + SCM_TIMESTAMPING_PKTINFO = 0x3c + SCM_TIMESTAMPNS = 0x21 + SCM_TXTIME = 0x3f + SCM_WIFI_STATUS = 0x25 + SFD_CLOEXEC = 0x400000 + SFD_NONBLOCK = 0x4000 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x400000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x4000 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x8000 + SO_ATTACH_BPF = 0x34 + SO_ATTACH_REUSEPORT_CBPF = 0x35 + SO_ATTACH_REUSEPORT_EBPF = 0x36 + SO_BINDTODEVICE = 0xd + SO_BINDTOIFINDEX = 0x41 + SO_BPF_EXTENSIONS = 0x32 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0x400 + SO_BUSY_POLL = 0x30 + SO_CNX_ADVICE = 0x37 + SO_COOKIE = 0x3b + SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x33 + SO_INCOMING_NAPI_ID = 0x3a + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x28 + SO_MARK = 0x22 + SO_MAX_PACING_RATE = 0x31 + SO_MEMINFO = 0x39 + SO_NOFCS = 0x27 + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x2 + SO_PASSSEC = 0x1f + SO_PEEK_OFF = 0x26 + SO_PEERCRED = 0x40 + SO_PEERGROUPS = 0x3d + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x100b + SO_RCVLOWAT = 0x800 + SO_RCVTIMEO = 0x2000 + SO_RCVTIMEO_NEW = 0x44 + SO_RCVTIMEO_OLD = 0x2000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x24 + SO_SECURITY_AUTHENTICATION = 0x5001 + SO_SECURITY_ENCRYPTION_NETWORK = 0x5004 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002 + SO_SELECT_ERR_QUEUE = 0x29 + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x100a + SO_SNDLOWAT = 0x1000 + SO_SNDTIMEO = 0x4000 + SO_SNDTIMEO_NEW = 0x45 + SO_SNDTIMEO_OLD = 0x4000 + SO_TIMESTAMPING = 0x23 + SO_TIMESTAMPING_NEW = 0x43 + SO_TIMESTAMPING_OLD = 0x23 + SO_TIMESTAMPNS = 0x21 + SO_TIMESTAMPNS_NEW = 0x42 + SO_TIMESTAMPNS_OLD = 0x21 + SO_TIMESTAMP_NEW = 0x46 + SO_TXTIME = 0x3f + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x25 + SO_ZEROCOPY = 0x3e + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x20005407 + TCGETA = 0x40125401 + TCGETS = 0x40245408 + TCGETS2 = 0x402c540c + TCSAFLUSH = 0x2 + TCSBRK = 0x20005405 + TCSBRKP = 0x5425 + TCSETA = 0x80125402 + TCSETAF = 0x80125404 + TCSETAW = 0x80125403 + TCSETS = 0x80245409 + TCSETS2 = 0x802c540d + TCSETSF = 0x8024540b + TCSETSF2 = 0x802c540f + TCSETSW = 0x8024540a + TCSETSW2 = 0x802c540e + TCXONC = 0x20005406 + TFD_CLOEXEC = 0x400000 + TFD_NONBLOCK = 0x4000 + TIOCCBRK = 0x2000747a + TIOCCONS = 0x20007424 + TIOCEXCL = 0x2000740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x40047400 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285443 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x40047483 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40047486 + TIOCGPTPEER = 0x20007489 + TIOCGRS485 = 0x40205441 + TIOCGSERIAL = 0x541e + TIOCGSID = 0x40047485 + TIOCGSOFTCAR = 0x40047464 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMIWAIT = 0x545c + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007484 + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSETD = 0x80047401 + TIOCSIG = 0x80047488 + TIOCSISO7816 = 0xc0285444 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x80047482 + TIOCSPTLCK = 0x80047487 + TIOCSRS485 = 0xc0205442 + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x80047465 + TIOCSTART = 0x2000746e + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x20005437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + __TIOCFLUSH = 0x80047410 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x30) EADDRNOTAVAIL = syscall.Errno(0x31) EADV = syscall.Errno(0x53) EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x25) EBADE = syscall.Errno(0x66) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x5d) EBADMSG = syscall.Errno(0x4c) EBADR = syscall.Errno(0x67) EBADRQC = syscall.Errno(0x6a) EBADSLT = syscall.Errno(0x6b) EBFONT = syscall.Errno(0x6d) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7f) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x5e) ECOMM = syscall.Errno(0x55) ECONNABORTED = syscall.Errno(0x35) @@ -2742,23 +553,15 @@ const ( EDEADLK = syscall.Errno(0x4e) EDEADLOCK = syscall.Errno(0x6c) EDESTADDRREQ = syscall.Errno(0x27) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x58) EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x40) EHOSTUNREACH = syscall.Errno(0x41) EHWPOISON = syscall.Errno(0x87) EIDRM = syscall.Errno(0x4d) EILSEQ = syscall.Errno(0x7a) EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x81) EKEYREJECTED = syscall.Errno(0x83) @@ -2775,8 +578,6 @@ const ( ELNRNG = syscall.Errno(0x62) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x7e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x28) EMULTIHOP = syscall.Errno(0x57) ENAMETOOLONG = syscall.Errno(0x3f) @@ -2784,102 +585,70 @@ const ( ENETDOWN = syscall.Errno(0x32) ENETRESET = syscall.Errno(0x34) ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x69) ENOBUFS = syscall.Errno(0x37) ENOCSI = syscall.Errno(0x64) ENODATA = syscall.Errno(0x6f) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x80) ENOLCK = syscall.Errno(0x4f) ENOLINK = syscall.Errno(0x52) ENOMEDIUM = syscall.Errno(0x7d) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x4b) ENONET = syscall.Errno(0x50) ENOPKG = syscall.Errno(0x71) ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x4a) ENOSTR = syscall.Errno(0x48) ENOSYS = syscall.Errno(0x5a) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x85) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x73) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x5c) EOWNERDEAD = syscall.Errno(0x84) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) EPROCLIM = syscall.Errno(0x43) EPROTO = syscall.Errno(0x56) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x59) EREMOTE = syscall.Errno(0x47) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x74) ERFKILL = syscall.Errno(0x86) - EROFS = syscall.Errno(0x1e) ERREMOTE = syscall.Errno(0x51) ESHUTDOWN = syscall.Errno(0x3a) ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x54) ESTALE = syscall.Errno(0x46) ESTRPIPE = syscall.Errno(0x5b) ETIME = syscall.Errno(0x49) ETIMEDOUT = syscall.Errno(0x3c) ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x63) EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x68) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x14) SIGCLD = syscall.Signal(0x14) SIGCONT = syscall.Signal(0x13) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) SIGLOST = syscall.Signal(0x1d) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x17) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1d) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x11) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x12) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 78cc04ea..96b9b8ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -3,7 +3,7 @@ // +build 386,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -1085,6 +1085,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 92185e69..ed522a84 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1075,6 +1075,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 373ad454..c8d36fe9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -3,7 +3,7 @@ // +build arm,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go package unix @@ -1065,6 +1065,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index fb6c6044..f1c146a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -3,7 +3,7 @@ // +build arm64,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1075,6 +1075,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index d8be0451..5402bd55 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -3,7 +3,7 @@ // +build 386,openbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -881,14 +881,15 @@ const ( MADV_SPACEAVAIL = 0x5 MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 - MAP_COPY = 0x4 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x1ff7 - MAP_HASSEMAPHORE = 0x200 - MAP_INHERIT = 0x80 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 - MAP_INHERIT_DONATE_COPY = 0x3 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 MAP_NOEXTEND = 0x100 @@ -896,7 +897,8 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_SHARED = 0x1 - MAP_TRYFIXED = 0x400 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -946,6 +948,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 1f9e8a29..ffaf2d2f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,openbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -920,10 +920,11 @@ const ( MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x7ff7 + MAP_FLAGMASK = 0xfff7 MAP_HASSEMAPHORE = 0x0 MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 @@ -990,6 +991,7 @@ const ( NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 79d5695c..7aa796a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,11 +1,11 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- _const.go - // +build arm,openbsd +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- _const.go + package unix import "syscall" @@ -881,10 +881,11 @@ const ( MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x3ff7 + MAP_FLAGMASK = 0xfff7 MAP_HASSEMAPHORE = 0x0 MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 @@ -896,6 +897,7 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x0 MAP_SHARED = 0x1 + MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 @@ -947,6 +949,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ec5f92de..1792d3f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -996,6 +996,7 @@ const ( NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 22569db3..46e054cc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -3,7 +3,7 @@ // +build amd64,solaris -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -666,6 +666,7 @@ const ( M_FLUSH = 0x86 NAME_MAX = 0xff NEWDEV = 0x1 + NFDBITS = 0x40 NL0 = 0x0 NL1 = 0x100 NLDLY = 0x100 diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace386_linux.go deleted file mode 100644 index 2d21c49e..00000000 --- a/vendor/golang.org/x/sys/unix/zptrace386_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. - -// +build linux -// +build 386 amd64 - -package unix - -import "unsafe" - -// PtraceRegs386 is the registers used by 386 binaries. -type PtraceRegs386 struct { - Ebx int32 - Ecx int32 - Edx int32 - Esi int32 - Edi int32 - Ebp int32 - Eax int32 - Xds int32 - Xes int32 - Xfs int32 - Xgs int32 - Orig_eax int32 - Eip int32 - Xcs int32 - Eflags int32 - Esp int32 - Xss int32 -} - -// PtraceGetRegs386 fetches the registers used by 386 binaries. -func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegs386 sets the registers used by 386 binaries. -func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} - -// PtraceRegsAmd64 is the registers used by amd64 binaries. -type PtraceRegsAmd64 struct { - R15 uint64 - R14 uint64 - R13 uint64 - R12 uint64 - Rbp uint64 - Rbx uint64 - R11 uint64 - R10 uint64 - R9 uint64 - R8 uint64 - Rax uint64 - Rcx uint64 - Rdx uint64 - Rsi uint64 - Rdi uint64 - Orig_rax uint64 - Rip uint64 - Cs uint64 - Eflags uint64 - Rsp uint64 - Ss uint64 - Fs_base uint64 - Gs_base uint64 - Ds uint64 - Es uint64 - Fs uint64 - Gs uint64 -} - -// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. -func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsAmd64 sets the registers used by amd64 binaries. -func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go new file mode 100644 index 00000000..89c5920e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -0,0 +1,41 @@ +// Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. + +// +build linux +// +build arm arm64 + +package unix + +import "unsafe" + +// PtraceRegsArm is the registers used by arm binaries. +type PtraceRegsArm struct { + Uregs [18]uint32 +} + +// PtraceGetRegsArm fetches the registers used by arm binaries. +func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm sets the registers used by arm binaries. +func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsArm64 is the registers used by arm64 binaries. +type PtraceRegsArm64 struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +// PtraceGetRegsArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm64 sets the registers used by arm64 binaries. +func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go new file mode 100644 index 00000000..6cb6d688 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go @@ -0,0 +1,17 @@ +// Code generated by linux/mkall.go generatePtraceRegSet("arm64"). DO NOT EDIT. + +package unix + +import "unsafe" + +// PtraceGetRegSetArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error { + iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))} + return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) +} + +// PtraceSetRegSetArm64 sets the registers used by arm64 binaries. +func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error { + iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))} + return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go new file mode 100644 index 00000000..24b841ee --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. + +// +build linux +// +build mips mips64 + +package unix + +import "unsafe" + +// PtraceRegsMips is the registers used by mips binaries. +type PtraceRegsMips struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips fetches the registers used by mips binaries. +func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips sets the registers used by mips binaries. +func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64 is the registers used by mips64 binaries. +type PtraceRegsMips64 struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64 fetches the registers used by mips64 binaries. +func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64 sets the registers used by mips64 binaries. +func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go new file mode 100644 index 00000000..47b04895 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. + +// +build linux +// +build mipsle mips64le + +package unix + +import "unsafe" + +// PtraceRegsMipsle is the registers used by mipsle binaries. +type PtraceRegsMipsle struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMipsle fetches the registers used by mipsle binaries. +func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMipsle sets the registers used by mipsle binaries. +func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64le is the registers used by mips64le binaries. +type PtraceRegsMips64le struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64le fetches the registers used by mips64le binaries. +func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64le sets the registers used by mips64le binaries. +func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go new file mode 100644 index 00000000..ea5d9cb5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -0,0 +1,80 @@ +// Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. + +// +build linux +// +build 386 amd64 + +package unix + +import "unsafe" + +// PtraceRegs386 is the registers used by 386 binaries. +type PtraceRegs386 struct { + Ebx int32 + Ecx int32 + Edx int32 + Esi int32 + Edi int32 + Ebp int32 + Eax int32 + Xds int32 + Xes int32 + Xfs int32 + Xgs int32 + Orig_eax int32 + Eip int32 + Xcs int32 + Eflags int32 + Esp int32 + Xss int32 +} + +// PtraceGetRegs386 fetches the registers used by 386 binaries. +func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegs386 sets the registers used by 386 binaries. +func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsAmd64 is the registers used by amd64 binaries. +type PtraceRegsAmd64 struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. +func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsAmd64 sets the registers used by amd64 binaries. +func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptracearm_linux.go deleted file mode 100644 index faf23bbe..00000000 --- a/vendor/golang.org/x/sys/unix/zptracearm_linux.go +++ /dev/null @@ -1,41 +0,0 @@ -// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. - -// +build linux -// +build arm arm64 - -package unix - -import "unsafe" - -// PtraceRegsArm is the registers used by arm binaries. -type PtraceRegsArm struct { - Uregs [18]uint32 -} - -// PtraceGetRegsArm fetches the registers used by arm binaries. -func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsArm sets the registers used by arm binaries. -func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} - -// PtraceRegsArm64 is the registers used by arm64 binaries. -type PtraceRegsArm64 struct { - Regs [31]uint64 - Sp uint64 - Pc uint64 - Pstate uint64 -} - -// PtraceGetRegsArm64 fetches the registers used by arm64 binaries. -func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsArm64 sets the registers used by arm64 binaries. -func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptracemips_linux.go deleted file mode 100644 index c431131e..00000000 --- a/vendor/golang.org/x/sys/unix/zptracemips_linux.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. - -// +build linux -// +build mips mips64 - -package unix - -import "unsafe" - -// PtraceRegsMips is the registers used by mips binaries. -type PtraceRegsMips struct { - Regs [32]uint64 - Lo uint64 - Hi uint64 - Epc uint64 - Badvaddr uint64 - Status uint64 - Cause uint64 -} - -// PtraceGetRegsMips fetches the registers used by mips binaries. -func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsMips sets the registers used by mips binaries. -func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} - -// PtraceRegsMips64 is the registers used by mips64 binaries. -type PtraceRegsMips64 struct { - Regs [32]uint64 - Lo uint64 - Hi uint64 - Epc uint64 - Badvaddr uint64 - Status uint64 - Cause uint64 -} - -// PtraceGetRegsMips64 fetches the registers used by mips64 binaries. -func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsMips64 sets the registers used by mips64 binaries. -func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go deleted file mode 100644 index dc3d6d37..00000000 --- a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. - -// +build linux -// +build mipsle mips64le - -package unix - -import "unsafe" - -// PtraceRegsMipsle is the registers used by mipsle binaries. -type PtraceRegsMipsle struct { - Regs [32]uint64 - Lo uint64 - Hi uint64 - Epc uint64 - Badvaddr uint64 - Status uint64 - Cause uint64 -} - -// PtraceGetRegsMipsle fetches the registers used by mipsle binaries. -func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsMipsle sets the registers used by mipsle binaries. -func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} - -// PtraceRegsMips64le is the registers used by mips64le binaries. -type PtraceRegsMips64le struct { - Regs [32]uint64 - Lo uint64 - Hi uint64 - Epc uint64 - Badvaddr uint64 - Status uint64 - Cause uint64 -} - -// PtraceGetRegsMips64le fetches the registers used by mips64le binaries. -func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) -} - -// PtraceSetRegsMips64le sets the registers used by mips64le binaries. -func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c4ec7ff8..23e94d36 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.1_11.go syscall_darwin_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,386,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,16 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -553,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -573,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -976,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1352,8 +1352,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,10 +1692,25 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1738,23 +1754,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go new file mode 100644 index 00000000..e263fbdb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -l32 -tags darwin,386,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,386,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s new file mode 100644 index 00000000..00da1ebf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go 386 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 23346dc6..e2ffb3be 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -360,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -763,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -793,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -943,6 +928,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -1872,8 +1887,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2341,20 +2357,18 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_gettimeofday_trampoline() +func libc_ptrace_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2408,28 +2422,6 @@ func libc_fstatfs64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___getdirentries64_trampoline() - -//go:linkname libc___getdirentries64 libc___getdirentries64 -//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 37b85b4f..6836a412 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -40,14 +40,10 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -88,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 @@ -108,6 +106,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +264,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 @@ -272,8 +274,6 @@ TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 - JMP libc___getdirentries64(SB) TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index 2581e896..10256173 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.1_11.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,amd64,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,16 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -553,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -573,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -976,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1352,8 +1352,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,10 +1692,25 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1738,23 +1754,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go new file mode 100644 index 00000000..314042a9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,amd64,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s new file mode 100644 index 00000000..d671e831 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go amd64 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c142e33e..c67e336e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -360,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -763,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -793,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -1391,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -1887,8 +1887,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2356,20 +2357,18 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -func libc_gettimeofday_trampoline() +func libc_ptrace_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2423,28 +2422,6 @@ func libc_fstatfs64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___getdirentries64_trampoline() - -//go:linkname libc___getdirentries64 libc___getdirentries64 -//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 1a391519..a3fdf099 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -40,14 +40,10 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -88,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 @@ -266,6 +264,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 @@ -274,8 +274,6 @@ TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 - JMP libc___getdirentries64(SB) TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index f8caecef..d34e6df2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.1_11.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,16 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -553,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -573,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -976,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1352,8 +1352,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go new file mode 100644 index 00000000..f519ce9a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -l32 -tags darwin,arm,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s new file mode 100644 index 00000000..488e5570 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go arm +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 01cffbf4..b759757a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -360,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -763,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -793,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -943,6 +928,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -1872,8 +1887,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2341,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 994056f3..b67f518f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -40,14 +40,10 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -88,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 @@ -108,6 +106,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 3fd0f3c8..8d39a09f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.1_11.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm64,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,16 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -553,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -573,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -976,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1352,8 +1352,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go new file mode 100644 index 00000000..d64e6c80 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm64,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s new file mode 100644 index 00000000..b29dabb0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go arm64 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 8f2691de..b2886126 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -360,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,21 +490,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -763,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -793,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -943,6 +928,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -1872,8 +1887,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2341,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 61dc0d4c..40cce1bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -40,14 +40,10 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +60,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -88,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 @@ -108,6 +106,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index cdfe9318..fe1fdd78 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -255,17 +255,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -1272,8 +1261,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index a783306b..600f1d26 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -387,8 +360,15 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -397,15 +377,24 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +403,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1363,7 +1352,7 @@ func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1606,8 +1595,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index f995520d..064934b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -387,8 +360,15 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -397,15 +377,24 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +403,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1606,8 +1595,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d681acd4..31d2c461 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,8 +350,14 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -387,8 +366,8 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -424,6 +403,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1606,8 +1595,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 5049b2ed..4adaaa56 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags freebsd,arm64 -- syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go +// go run mksyscall.go -tags freebsd,arm64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,arm64 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -404,8 +377,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +387,24 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1606,8 +1595,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go new file mode 100644 index 00000000..92efa1da --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -0,0 +1,87 @@ +// go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build illumos,amd64 + +package unix + +import ( + "unsafe" +) + +//go:cgo_import_dynamic libc_readv readv "libc.so" +//go:cgo_import_dynamic libc_preadv preadv "libc.so" +//go:cgo_import_dynamic libc_writev writev "libc.so" +//go:cgo_import_dynamic libc_pwritev pwritev "libc.so" + +//go:linkname procreadv libc_readv +//go:linkname procpreadv libc_preadv +//go:linkname procwritev libc_writev +//go:linkname procpwritev libc_pwritev + +var ( + procreadv, + procpreadv, + procwritev, + procpwritev syscallFunc +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go new file mode 100644 index 00000000..df217825 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -0,0 +1,1856 @@ +// Code generated by mkmerge.go; DO NOT EDIT. + +// +build linux + +package unix + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := RawSyscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := RawSyscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { + _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdCreate(clockid int, flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_TIMERFD_CREATE, uintptr(clockid), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdGettime(fd int, currValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall(SYS_TIMERFD_GETTIME, uintptr(fd), uintptr(unsafe.Pointer(currValue)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall6(SYS_TIMERFD_SETTIME, uintptr(fd), uintptr(flags), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c5e46e4c..19ebd3ff 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1465,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1739,17 +55,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2000,8 +306,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2010,8 +317,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index da8819e4..5c562182 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2016,8 +332,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2026,8 +343,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2402,16 +720,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 6ad9be6d..dc69d99c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1465,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1739,16 +55,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -1928,7 +234,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2136,8 +442,9 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2146,8 +453,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index f8833178..1b897dee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1835,7 +151,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { +func getrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) @@ -1939,8 +255,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1949,8 +266,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1989,7 +307,7 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { +func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) @@ -2270,16 +588,6 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(cmdline) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 8eebc6c7..49186843 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1930,8 +246,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1940,8 +257,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2387,16 +705,6 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (p1 int, p2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) p1 = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ecf62a67..9171d3bd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1960,8 +276,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1970,8 +287,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2346,16 +664,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func fstat(fd int, st *stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1ba0f7b6..82286f04 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1960,8 +276,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1970,8 +287,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2346,16 +664,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func fstat(fd int, st *stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 20012b2f..15920621 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1930,8 +246,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1940,8 +257,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2387,16 +705,6 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (p1 int, p2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) p1 = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 2b520dea..73a42e2c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2042,8 +358,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2052,8 +369,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2454,16 +772,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index d9f044c9..6b855953 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2042,8 +358,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2052,8 +369,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2454,16 +772,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9feed65e..b7613344 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1919,8 +235,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1929,8 +246,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2250,16 +568,6 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(cmdline) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 0a651508..d7032ab1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2012,8 +328,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2022,8 +339,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2234,16 +552,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index e27f6693..bcbbdd90 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -14,1428 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1444,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1465,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1756,7 +72,7 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2011,8 +327,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2021,8 +338,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2412,16 +730,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 7e058266..3bbd9e39 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1498,8 +1507,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1634,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d94d076a..d8cf5012 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1498,8 +1507,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1634,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index cf5bf3d0..1153fe69 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1498,8 +1507,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1634,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 243a9317..24b4ebb4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1498,8 +1507,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1634,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a9532d07..b44b31ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,8 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -1304,8 +1303,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0cb9f017..67f93ee7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,8 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -1304,8 +1303,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 6fc99b54..d7c878b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,8 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -1304,8 +1303,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 27878a72..8facd695 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -377,8 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -1304,8 +1303,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 5f614760..a96165d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1478,8 +1478,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = e1 } diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 37dcc74c..102f1ab4 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,4 +1,4 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. // +build 386,openbsd @@ -30,6 +30,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index fe6caa6e..4866fced 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -31,6 +31,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 6eb8c0b0..d3801eb2 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -30,6 +30,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index e869c060..54559a89 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -429,4 +429,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 4917b8ab..054a741b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -351,4 +351,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index f85fcb4f..307f2ba1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -393,4 +393,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 678a119b..e9404dd5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -296,4 +296,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 222c9f9a..68bb6d29 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -414,4 +414,8 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 + SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 28e6d0e9..4e525118 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -344,4 +344,8 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index e643c6f6..4d9aa300 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -344,4 +344,8 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 01d93c42..64af0707 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -414,4 +414,8 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 + SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5744149e..cc3c067b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -393,4 +393,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 21c83204..4050ff98 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -393,4 +393,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index c1bb6d8f..529abb6a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -295,4 +295,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index bc3cc6b5..27665001 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -358,4 +358,8 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 0a2841ba..4dc82bb2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -373,4 +373,7 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index c206f2b0..71ea1d6d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -467,3 +467,13 @@ type Utsname struct { Version [32]byte Machine [32]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 7312e95f..2a3ec615 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -128,9 +128,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -153,9 +153,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { @@ -375,15 +375,15 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -423,7 +423,7 @@ type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint32 } type Kevent_t struct { @@ -458,7 +458,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -469,7 +469,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -536,7 +535,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -547,7 +546,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -564,7 +563,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 @@ -698,3 +697,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 29ba2f5b..e11e9549 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -123,9 +123,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -148,9 +148,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { @@ -275,10 +275,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -428,7 +426,7 @@ type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint64 } type Kevent_t struct { @@ -463,7 +461,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -474,7 +472,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -541,7 +538,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -552,7 +549,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -569,7 +566,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 @@ -623,7 +620,6 @@ type BpfZbuf struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -704,3 +700,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index b4090ef3..6f79227d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -405,7 +405,7 @@ type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint32 } type Kevent_t struct { @@ -681,3 +681,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 1542a877..c6fe1d09 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs types_freebsd.go | go run mkpost.go +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,freebsd @@ -123,9 +123,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -148,9 +148,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { @@ -275,10 +275,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -326,11 +324,9 @@ const ( PTRACE_CONT = 0x7 PTRACE_DETACH = 0xb PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 PTRACE_GETLWPLIST = 0xf PTRACE_GETNUMLWPS = 0xe PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 PTRACE_IO = 0xc PTRACE_KILL = 0x8 PTRACE_LWPEVENTS = 0x18 @@ -373,15 +369,15 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [8]byte - X_reason [40]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte } type Sigset_t struct { @@ -394,19 +390,21 @@ type Reg struct { Sp uint64 Elr uint64 Spsr uint32 + _ [4]byte } type FpReg struct { - Fp_q [32]uint128 - Fp_sr uint32 - Fp_cr uint32 + Q [32][16]uint8 + Sr uint32 + Cr uint32 + _ [8]byte } type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint64 } type Kevent_t struct { @@ -441,7 +439,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -452,7 +450,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -519,7 +516,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -530,7 +527,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -547,7 +544,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 @@ -601,7 +598,6 @@ type BpfZbuf struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -682,3 +678,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go new file mode 100644 index 00000000..416f7767 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -0,0 +1,2345 @@ +// Code generated by mkmerge.go; DO NOT EDIT. + +// +build linux + +package unix + +const ( + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + + _C_long_long int64 +) + +type ItimerSpec struct { + Interval Timespec + Value Timespec +} + +const ( + TIME_OK = 0x0 + TIME_INS = 0x1 + TIME_DEL = 0x2 + TIME_OOP = 0x3 + TIME_WAIT = 0x4 + TIME_ERROR = 0x5 + TIME_BAD = 0x5 +) + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Fsid struct { + Val [2]int32 +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type FscryptPolicyV1 struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptPolicyV2 struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + _ [4]uint8 + Master_key_identifier [16]uint8 +} + +type FscryptGetPolicyExArg struct { + Size uint64 + Policy [24]byte +} + +type FscryptKeySpecifier struct { + Type uint32 + _ uint32 + U [32]byte +} + +type FscryptAddKeyArg struct { + Key_spec FscryptKeySpecifier + Raw_size uint32 + Key_id uint32 + _ [8]uint32 +} + +type FscryptRemoveKeyArg struct { + Key_spec FscryptKeySpecifier + Removal_status_flags uint32 + _ [5]uint32 +} + +type FscryptGetKeyStatusArg struct { + Key_spec FscryptKeySpecifier + _ [6]uint32 + Status uint32 + Status_flags uint32 + User_count uint32 + _ [13]uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + Ifindex int32 + Addr [16]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + +type RawSockaddrPPPoX [0x1e]byte + +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + +type RawSockaddrL2TPIP struct { + Family uint16 + Unused uint16 + Addr [4]byte /* in_addr */ + Conn_id uint32 + _ [4]uint8 +} + +type RawSockaddrL2TPIP6 struct { + Family uint16 + Unused uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + Conn_id uint32 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +type CanFilter struct { + Id uint32 + Mask uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa + SizeofSockaddrCAN = 0x18 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 + SizeofSockaddrL2TPIP = 0x10 + SizeofSockaddrL2TPIP6 = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 +) + +const ( + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x36 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + +const ( + SizeofSockFilter = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type SignalfdSiginfo struct { + Signo uint32 + Errno int32 + Code int32 + Pid uint32 + Uid uint32 + Fd int32 + Tid uint32 + Band uint32 + Overrun uint32 + Trapno uint32 + Status int32 + Int int32 + Ptr uint64 + Utime uint64 + Stime uint64 + Addr uint64 + Addr_lsb uint16 + _ uint16 + Syscall int32 + Call_addr uint64 + Arch uint32 + _ [28]uint8 +} + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +const ( + _CPU_SETSIZE = 0x400 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 + _ uint16 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 +) + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + Time RTCTime +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + Data *byte +} + +const ( + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 + Flags uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +type ScmTimestamping struct { + Ts [3]Timespec +} + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff + + SCM_TSTAMP_SND = 0x0 + SCM_TSTAMP_SCHED = 0x1 + SCM_TSTAMP_ACK = 0x2 +) + +type SockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_FREEZE = 0x16 + BPF_BTF_GET_NEXT_ID = 0x17 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_MAP_TYPE_SK_STORAGE = 0x18 + BPF_MAP_TYPE_DEVMAP_HASH = 0x19 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18 + BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19 + BPF_PROG_TYPE_TRACING = 0x1a + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_CGROUP_SYSCTL = 0x12 + BPF_CGROUP_UDP4_RECVMSG = 0x13 + BPF_CGROUP_UDP6_RECVMSG = 0x14 + BPF_CGROUP_GETSOCKOPT = 0x15 + BPF_CGROUP_SETSOCKOPT = 0x16 + BPF_TRACE_RAW_TP = 0x17 + BPF_TRACE_FENTRY = 0x18 + BPF_TRACE_FEXIT = 0x19 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_ADJ_ROOM_MAC = 0x1 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_LWT_ENCAP_IP = 0x2 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_LWT_REROUTE = 0x80 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_SOCK_OPS_RTT_CB = 0xc + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) + +const ( + DEVLINK_CMD_UNSPEC = 0x0 + DEVLINK_CMD_GET = 0x1 + DEVLINK_CMD_SET = 0x2 + DEVLINK_CMD_NEW = 0x3 + DEVLINK_CMD_DEL = 0x4 + DEVLINK_CMD_PORT_GET = 0x5 + DEVLINK_CMD_PORT_SET = 0x6 + DEVLINK_CMD_PORT_NEW = 0x7 + DEVLINK_CMD_PORT_DEL = 0x8 + DEVLINK_CMD_PORT_SPLIT = 0x9 + DEVLINK_CMD_PORT_UNSPLIT = 0xa + DEVLINK_CMD_SB_GET = 0xb + DEVLINK_CMD_SB_SET = 0xc + DEVLINK_CMD_SB_NEW = 0xd + DEVLINK_CMD_SB_DEL = 0xe + DEVLINK_CMD_SB_POOL_GET = 0xf + DEVLINK_CMD_SB_POOL_SET = 0x10 + DEVLINK_CMD_SB_POOL_NEW = 0x11 + DEVLINK_CMD_SB_POOL_DEL = 0x12 + DEVLINK_CMD_SB_PORT_POOL_GET = 0x13 + DEVLINK_CMD_SB_PORT_POOL_SET = 0x14 + DEVLINK_CMD_SB_PORT_POOL_NEW = 0x15 + DEVLINK_CMD_SB_PORT_POOL_DEL = 0x16 + DEVLINK_CMD_SB_TC_POOL_BIND_GET = 0x17 + DEVLINK_CMD_SB_TC_POOL_BIND_SET = 0x18 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 0x19 + DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 0x1a + DEVLINK_CMD_SB_OCC_SNAPSHOT = 0x1b + DEVLINK_CMD_SB_OCC_MAX_CLEAR = 0x1c + DEVLINK_CMD_ESWITCH_GET = 0x1d + DEVLINK_CMD_ESWITCH_SET = 0x1e + DEVLINK_CMD_DPIPE_TABLE_GET = 0x1f + DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20 + DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21 + DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22 + DEVLINK_CMD_MAX = 0x44 + DEVLINK_PORT_TYPE_NOTSET = 0x0 + DEVLINK_PORT_TYPE_AUTO = 0x1 + DEVLINK_PORT_TYPE_ETH = 0x2 + DEVLINK_PORT_TYPE_IB = 0x3 + DEVLINK_SB_POOL_TYPE_INGRESS = 0x0 + DEVLINK_SB_POOL_TYPE_EGRESS = 0x1 + DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0x0 + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 0x1 + DEVLINK_ESWITCH_MODE_LEGACY = 0x0 + DEVLINK_ESWITCH_MODE_SWITCHDEV = 0x1 + DEVLINK_ESWITCH_INLINE_MODE_NONE = 0x0 + DEVLINK_ESWITCH_INLINE_MODE_LINK = 0x1 + DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 0x2 + DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 0x3 + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0x0 + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 0x1 + DEVLINK_ATTR_UNSPEC = 0x0 + DEVLINK_ATTR_BUS_NAME = 0x1 + DEVLINK_ATTR_DEV_NAME = 0x2 + DEVLINK_ATTR_PORT_INDEX = 0x3 + DEVLINK_ATTR_PORT_TYPE = 0x4 + DEVLINK_ATTR_PORT_DESIRED_TYPE = 0x5 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 0x6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 0x7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 0x8 + DEVLINK_ATTR_PORT_SPLIT_COUNT = 0x9 + DEVLINK_ATTR_PORT_SPLIT_GROUP = 0xa + DEVLINK_ATTR_SB_INDEX = 0xb + DEVLINK_ATTR_SB_SIZE = 0xc + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 0xd + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 0xe + DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 0xf + DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 0x10 + DEVLINK_ATTR_SB_POOL_INDEX = 0x11 + DEVLINK_ATTR_SB_POOL_TYPE = 0x12 + DEVLINK_ATTR_SB_POOL_SIZE = 0x13 + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 0x14 + DEVLINK_ATTR_SB_THRESHOLD = 0x15 + DEVLINK_ATTR_SB_TC_INDEX = 0x16 + DEVLINK_ATTR_SB_OCC_CUR = 0x17 + DEVLINK_ATTR_SB_OCC_MAX = 0x18 + DEVLINK_ATTR_ESWITCH_MODE = 0x19 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 0x1a + DEVLINK_ATTR_DPIPE_TABLES = 0x1b + DEVLINK_ATTR_DPIPE_TABLE = 0x1c + DEVLINK_ATTR_DPIPE_TABLE_NAME = 0x1d + DEVLINK_ATTR_DPIPE_TABLE_SIZE = 0x1e + DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 0x1f + DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 0x20 + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 0x21 + DEVLINK_ATTR_DPIPE_ENTRIES = 0x22 + DEVLINK_ATTR_DPIPE_ENTRY = 0x23 + DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 0x24 + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 0x25 + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 0x26 + DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 0x27 + DEVLINK_ATTR_DPIPE_MATCH = 0x28 + DEVLINK_ATTR_DPIPE_MATCH_VALUE = 0x29 + DEVLINK_ATTR_DPIPE_MATCH_TYPE = 0x2a + DEVLINK_ATTR_DPIPE_ACTION = 0x2b + DEVLINK_ATTR_DPIPE_ACTION_VALUE = 0x2c + DEVLINK_ATTR_DPIPE_ACTION_TYPE = 0x2d + DEVLINK_ATTR_DPIPE_VALUE = 0x2e + DEVLINK_ATTR_DPIPE_VALUE_MASK = 0x2f + DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 0x30 + DEVLINK_ATTR_DPIPE_HEADERS = 0x31 + DEVLINK_ATTR_DPIPE_HEADER = 0x32 + DEVLINK_ATTR_DPIPE_HEADER_NAME = 0x33 + DEVLINK_ATTR_DPIPE_HEADER_ID = 0x34 + DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 0x35 + DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 0x36 + DEVLINK_ATTR_DPIPE_HEADER_INDEX = 0x37 + DEVLINK_ATTR_DPIPE_FIELD = 0x38 + DEVLINK_ATTR_DPIPE_FIELD_NAME = 0x39 + DEVLINK_ATTR_DPIPE_FIELD_ID = 0x3a + DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 0x3b + DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c + DEVLINK_ATTR_PAD = 0x3d + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e + DEVLINK_ATTR_MAX = 0x8c + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 + DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 + DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0x0 + DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0x0 + DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0x0 + DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0x0 + DEVLINK_DPIPE_HEADER_ETHERNET = 0x0 + DEVLINK_DPIPE_HEADER_IPV4 = 0x1 + DEVLINK_DPIPE_HEADER_IPV6 = 0x2 +) + +type FsverityDigest struct { + Algorithm uint16 + Size uint16 +} + +type FsverityEnableArg struct { + Version uint32 + Hash_algorithm uint32 + Block_size uint32 + Salt_size uint32 + Salt_ptr uint64 + Sig_size uint32 + _ uint32 + Sig_ptr uint64 + _ [11]uint64 +} + +type Nhmsg struct { + Family uint8 + Scope uint8 + Protocol uint8 + Resvd uint8 + Flags uint32 +} + +type NexthopGrp struct { + Id uint32 + Weight uint8 + Resvd1 uint8 + Resvd2 uint16 +} + +const ( + NHA_UNSPEC = 0x0 + NHA_ID = 0x1 + NHA_GROUP = 0x2 + NHA_GROUP_TYPE = 0x3 + NHA_BLACKHOLE = 0x4 + NHA_OIF = 0x5 + NHA_GATEWAY = 0x6 + NHA_ENCAP_TYPE = 0x7 + NHA_ENCAP = 0x8 + NHA_GROUPS = 0x9 + NHA_MASTER = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 50bc4128..761b67c8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 _ uint16 @@ -114,36 +100,6 @@ type Stat_t struct { Ino uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [1]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -165,126 +117,11 @@ type Flock_t struct { Pid int32 } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -295,41 +132,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -346,390 +153,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Ebx int32 Ecx int32 @@ -771,15 +204,6 @@ type Sysinfo_t struct { _ [8]int8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -794,35 +218,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -831,33 +227,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -869,13 +238,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -925,279 +287,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1273,22 +369,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1296,88 +376,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1393,18 +391,6 @@ type Statfs_t struct { Spare [4]int32 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1415,589 +401,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x18 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2008,13 +415,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2024,168 +424,17 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2316,182 +565,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint16 @@ -2506,18 +579,21 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]int8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 055eaa76..201fb348 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -113,36 +99,6 @@ type Stat_t struct { _ [3]int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -152,10 +108,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -165,126 +117,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -295,41 +132,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -347,390 +154,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { R15 uint64 R14 uint64 @@ -783,15 +216,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -807,35 +231,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -844,33 +240,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -882,13 +251,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -936,279 +298,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1284,22 +380,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1307,88 +387,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1404,18 +402,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1427,589 +413,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x20 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2020,13 +427,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2037,168 +437,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x1269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2329,182 +579,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint64 @@ -2520,18 +594,21 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 66019c9c..8051b561 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 _ uint16 @@ -116,36 +102,6 @@ type Stat_t struct { Ino uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -155,10 +111,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -169,126 +121,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -299,41 +136,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -350,390 +157,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Uregs [18]uint32 } @@ -759,15 +192,6 @@ type Sysinfo_t struct { _ [8]uint8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -783,35 +207,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -820,33 +216,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -858,13 +227,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -914,279 +276,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1262,22 +358,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1285,88 +365,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1383,18 +381,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1405,589 +391,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x18 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -1998,13 +405,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2015,168 +415,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2307,182 +557,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint16 @@ -2497,18 +571,21 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]uint8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3104798c..a936f216 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -114,36 +100,6 @@ type Stat_t struct { _ [2]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,126 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -296,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -348,390 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 + SizeofSockFprog = 0x10 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [31]uint64 Sp uint64 @@ -761,15 +194,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -786,35 +210,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -823,33 +219,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -861,13 +230,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -915,279 +277,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1263,22 +359,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1286,88 +366,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1383,18 +381,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1406,589 +392,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -1999,13 +406,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2016,168 +416,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2308,182 +558,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2499,18 +573,21 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 46c86021..aaca03dd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]int32 @@ -115,36 +101,6 @@ type Stat_t struct { Pad5 [14]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -168,126 +120,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -298,41 +135,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -349,390 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -764,15 +197,6 @@ type Sysinfo_t struct { _ [8]int8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -788,35 +212,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -825,33 +221,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -863,13 +232,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -919,279 +281,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1267,22 +363,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1290,88 +370,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1389,18 +387,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1411,589 +397,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x18 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2004,13 +411,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2021,168 +421,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2313,182 +563,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2503,18 +577,21 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]int8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index c2fe1a62..2e7f3b8c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]uint32 @@ -114,36 +100,6 @@ type Stat_t struct { Blocks int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,126 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -296,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -348,390 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 + SizeofSockFprog = 0x10 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -764,15 +197,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -783,40 +207,13 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -825,33 +222,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -863,13 +233,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -917,279 +280,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1265,22 +362,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1288,88 +369,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1385,18 +384,6 @@ type Statfs_t struct { Spare [5]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1408,589 +395,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2001,13 +409,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2018,168 +419,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2310,182 +561,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2501,18 +576,21 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index f1eb0d39..16add5a2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]uint32 @@ -114,36 +100,6 @@ type Stat_t struct { Blocks int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,126 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -296,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -348,390 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 + SizeofSockFprog = 0x10 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -764,15 +197,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -783,40 +207,13 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -825,33 +222,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -863,13 +233,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -917,279 +280,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1265,22 +362,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1288,88 +369,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1385,18 +384,6 @@ type Statfs_t struct { Spare [5]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1408,589 +395,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2001,13 +409,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2018,168 +419,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2310,182 +561,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2501,18 +576,21 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 8759bc36..4ed2c8e5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]int32 @@ -115,36 +101,6 @@ type Stat_t struct { Pad5 [14]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -168,126 +120,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -298,41 +135,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -349,390 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc -) - -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -764,15 +197,6 @@ type Sysinfo_t struct { _ [8]int8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -788,35 +212,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -825,33 +221,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -863,13 +232,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -919,279 +281,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1267,22 +363,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1290,88 +370,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1389,18 +387,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1411,589 +397,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x18 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2004,13 +411,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2021,168 +421,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2313,182 +563,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2503,18 +577,21 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]int8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a8120054..74151909 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -115,36 +101,6 @@ type Stat_t struct { _ uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -167,126 +119,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -297,41 +134,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -349,390 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Gpr [32]uint64 Nip uint64 @@ -771,15 +204,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -796,35 +220,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -833,33 +229,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -871,13 +240,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -925,279 +287,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1273,22 +369,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1296,88 +376,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1393,18 +391,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1416,589 +402,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x20 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2009,13 +416,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2026,168 +426,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x20001269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2318,182 +568,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint64 @@ -2509,18 +583,21 @@ type LoopInfo struct { Reserved [4]uint8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 74b7a919..046c2deb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -115,36 +101,6 @@ type Stat_t struct { _ uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -167,126 +119,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -297,41 +134,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -349,390 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Gpr [32]uint64 Nip uint64 @@ -771,15 +204,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -796,35 +220,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -833,33 +229,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -871,13 +240,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -925,279 +287,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1273,22 +369,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1296,88 +376,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1393,18 +391,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1416,589 +402,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 + SizeofTpacketHdr = 0x20 ) -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2009,13 +416,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2026,168 +426,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x20001269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2318,182 +568,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint64 @@ -2509,18 +583,21 @@ type LoopInfo struct { Reserved [4]uint8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 8344583e..0f2f61a6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -114,36 +100,6 @@ type Stat_t struct { _ [2]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,126 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -296,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -348,390 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 + SizeofSockFprog = 0x10 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Pc uint64 Ra uint64 @@ -789,15 +222,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -808,40 +232,13 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -850,33 +247,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -888,13 +258,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -942,279 +305,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1290,22 +387,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1313,88 +394,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1410,18 +409,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1433,589 +420,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2026,13 +434,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2043,168 +444,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2335,182 +586,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2526,18 +601,21 @@ type LoopInfo struct { Reserved [4]uint8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d8fc0bc1..cca1b6be 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -113,36 +99,6 @@ type Stat_t struct { _ [3]int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -152,10 +108,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -165,126 +117,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x6 - FADV_NOREUSE = 0x7 + FADV_DONTNEED = 0x6 + FADV_NOREUSE = 0x7 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -295,41 +132,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -347,390 +154,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 + SizeofSockFprog = 0x10 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Psw PtracePsw Gprs [16]uint64 @@ -784,15 +217,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -809,35 +233,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -846,33 +242,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -884,13 +253,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -938,279 +300,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1286,22 +382,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1309,88 +389,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type uint32 Bsize uint32 @@ -1407,18 +405,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1430,589 +416,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2023,13 +430,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2040,168 +440,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2332,182 +582,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint16 @@ -2523,18 +597,21 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 5e0ab932..33a73bf1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -89,13 +82,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 _ uint16 @@ -116,36 +102,6 @@ type Stat_t struct { _ uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -155,10 +111,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -169,126 +121,11 @@ type Flock_t struct { _ [2]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -299,41 +136,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -351,390 +158,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 + SizeofSockFprog = 0x10 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [16]uint64 Tstate uint64 @@ -766,15 +199,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -791,35 +215,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x800 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -828,33 +224,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -866,13 +235,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -920,279 +282,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1268,22 +364,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1291,88 +371,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1388,18 +386,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1411,589 +397,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2004,13 +411,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2021,168 +421,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2313,182 +563,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2504,18 +578,21 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 86736ab6..a89100c0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -78,6 +78,33 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint32 + Bsize uint32 + Frsize uint32 + Iosize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint32 + Namemax uint32 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte +} + type Flock_t struct { Start int64 Len int64 @@ -103,6 +130,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 3427811f..289184e0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -82,6 +82,34 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte + _ [4]byte +} + type Flock_t struct { Start int64 Len int64 @@ -107,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 399f37a4..428c450e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -83,6 +83,33 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint32 + Bsize uint32 + Frsize uint32 + Iosize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint32 + Namemax uint32 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte +} + type Flock_t struct { Start int64 Len int64 @@ -108,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 32f0c15d..6f1f2842 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -82,6 +82,34 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte + _ [4]byte +} + type Flock_t struct { Start int64 Len int64 @@ -107,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 8531a190..23ed9fe5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -211,6 +211,12 @@ type Cmsghdr struct { Type int32 } +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + type Inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 @@ -236,6 +242,7 @@ const ( SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc SizeofInet6Pktinfo = 0x14 SizeofIPv6MTUInfo = 0x24 SizeofICMPv6Filter = 0x20 diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index 520b9ada..48ec64b4 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) { // Transform the remaining input, growing dst and src buffers as necessary. for { n := copy(src, s[pSrc:]) - nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + atEOF := pSrc+n == len(s) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF) pDst += nDst pSrc += nSrc @@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) { dst = grow(dst, pDst) } } else if err == ErrShortSrc { + if atEOF { + return string(dst[:pDst]), pSrc, err + } if nSrc == 0 { src = grow(src, 0) } diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 48d14400..50deb660 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() { // Rule W1. // Changes all NSMs. - preceedingCharacterType := s.sos + precedingCharacterType := s.sos for i, t := range s.types { if t == NSM { - s.types[i] = preceedingCharacterType + s.types[i] = precedingCharacterType } else { if t.in(LRI, RLI, FSI, PDI) { - preceedingCharacterType = ON + precedingCharacterType = ON } - preceedingCharacterType = t + precedingCharacterType = t } } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 022e3c69..16b11db5 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go new file mode 100644 index 00000000..7ffa3651 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -0,0 +1,1923 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.14 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16896 bytes (16.50 KiB). Checksum: 6f0927067913dc6d. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 240 blocks, 15360 entries, 15360 bytes +// The third block is the zero block. +var bidiValues = [15360]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7a: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19af: 0x000c, + 0x19b0: 0x000c, 0x19b1: 0x000c, + 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19ff: 0x000c, + // Block 0x68, offset 0x1a00 + 0x1a20: 0x000c, 0x1a21: 0x000c, 0x1a22: 0x000c, 0x1a23: 0x000c, + 0x1a24: 0x000c, 0x1a25: 0x000c, 0x1a26: 0x000c, 0x1a27: 0x000c, 0x1a28: 0x000c, 0x1a29: 0x000c, + 0x1a2a: 0x000c, 0x1a2b: 0x000c, 0x1a2c: 0x000c, 0x1a2d: 0x000c, 0x1a2e: 0x000c, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, 0x1a32: 0x000c, 0x1a33: 0x000c, 0x1a34: 0x000c, 0x1a35: 0x000c, + 0x1a36: 0x000c, 0x1a37: 0x000c, 0x1a38: 0x000c, 0x1a39: 0x000c, 0x1a3a: 0x000c, 0x1a3b: 0x000c, + 0x1a3c: 0x000c, 0x1a3d: 0x000c, 0x1a3e: 0x000c, 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x000a, 0x1a41: 0x000a, 0x1a42: 0x000a, 0x1a43: 0x000a, 0x1a44: 0x000a, 0x1a45: 0x000a, + 0x1a46: 0x000a, 0x1a47: 0x000a, 0x1a48: 0x000a, 0x1a49: 0x000a, 0x1a4a: 0x000a, 0x1a4b: 0x000a, + 0x1a4c: 0x000a, 0x1a4d: 0x000a, 0x1a4e: 0x000a, 0x1a4f: 0x000a, 0x1a50: 0x000a, 0x1a51: 0x000a, + 0x1a52: 0x000a, 0x1a53: 0x000a, 0x1a54: 0x000a, 0x1a55: 0x000a, 0x1a56: 0x000a, 0x1a57: 0x000a, + 0x1a58: 0x000a, 0x1a59: 0x000a, 0x1a5a: 0x000a, 0x1a5b: 0x000a, 0x1a5c: 0x000a, 0x1a5d: 0x000a, + 0x1a5e: 0x000a, 0x1a5f: 0x000a, 0x1a60: 0x000a, 0x1a61: 0x000a, 0x1a62: 0x003a, 0x1a63: 0x002a, + 0x1a64: 0x003a, 0x1a65: 0x002a, 0x1a66: 0x003a, 0x1a67: 0x002a, 0x1a68: 0x003a, 0x1a69: 0x002a, + 0x1a6a: 0x000a, 0x1a6b: 0x000a, 0x1a6c: 0x000a, 0x1a6d: 0x000a, 0x1a6e: 0x000a, 0x1a6f: 0x000a, + 0x1a70: 0x000a, 0x1a71: 0x000a, 0x1a72: 0x000a, 0x1a73: 0x000a, 0x1a74: 0x000a, 0x1a75: 0x000a, + 0x1a76: 0x000a, 0x1a77: 0x000a, 0x1a78: 0x000a, 0x1a79: 0x000a, 0x1a7a: 0x000a, 0x1a7b: 0x000a, + 0x1a7c: 0x000a, 0x1a7d: 0x000a, 0x1a7e: 0x000a, 0x1a7f: 0x000a, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x000a, 0x1ae3: 0x000a, + 0x1ae4: 0x000a, 0x1ae5: 0x000a, 0x1ae6: 0x000a, 0x1ae7: 0x000a, 0x1ae8: 0x000a, 0x1ae9: 0x000a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1a: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x0009, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, + 0x1b88: 0x003a, 0x1b89: 0x002a, 0x1b8a: 0x003a, 0x1b8b: 0x002a, + 0x1b8c: 0x003a, 0x1b8d: 0x002a, 0x1b8e: 0x003a, 0x1b8f: 0x002a, 0x1b90: 0x003a, 0x1b91: 0x002a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x003a, 0x1b95: 0x002a, 0x1b96: 0x003a, 0x1b97: 0x002a, + 0x1b98: 0x003a, 0x1b99: 0x002a, 0x1b9a: 0x003a, 0x1b9b: 0x002a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, + 0x1baa: 0x000c, 0x1bab: 0x000c, 0x1bac: 0x000c, 0x1bad: 0x000c, + 0x1bb0: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, + 0x1bbd: 0x000a, 0x1bbe: 0x000a, 0x1bbf: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bd9: 0x000c, 0x1bda: 0x000c, 0x1bdb: 0x000a, 0x1bdc: 0x000a, + 0x1be0: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c3b: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x000a, 0x1c41: 0x000a, 0x1c42: 0x000a, 0x1c43: 0x000a, 0x1c44: 0x000a, 0x1c45: 0x000a, + 0x1c46: 0x000a, 0x1c47: 0x000a, 0x1c48: 0x000a, 0x1c49: 0x000a, 0x1c4a: 0x000a, 0x1c4b: 0x000a, + 0x1c4c: 0x000a, 0x1c4d: 0x000a, 0x1c4e: 0x000a, 0x1c4f: 0x000a, 0x1c50: 0x000a, 0x1c51: 0x000a, + 0x1c52: 0x000a, 0x1c53: 0x000a, 0x1c54: 0x000a, 0x1c55: 0x000a, 0x1c56: 0x000a, 0x1c57: 0x000a, + 0x1c58: 0x000a, 0x1c59: 0x000a, 0x1c5a: 0x000a, 0x1c5b: 0x000a, 0x1c5c: 0x000a, 0x1c5d: 0x000a, + 0x1c5e: 0x000a, 0x1c5f: 0x000a, 0x1c60: 0x000a, 0x1c61: 0x000a, 0x1c62: 0x000a, 0x1c63: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c9d: 0x000a, + 0x1c9e: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, + 0x1cfc: 0x000a, 0x1cfd: 0x000a, 0x1cfe: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d31: 0x000a, 0x1d32: 0x000a, 0x1d33: 0x000a, 0x1d34: 0x000a, 0x1d35: 0x000a, + 0x1d36: 0x000a, 0x1d37: 0x000a, 0x1d38: 0x000a, 0x1d39: 0x000a, 0x1d3a: 0x000a, 0x1d3b: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, 0x1d3f: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d4c: 0x000a, 0x1d4d: 0x000a, 0x1d4e: 0x000a, 0x1d4f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dde: 0x000a, 0x1ddf: 0x000a, + 0x1dff: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e10: 0x000a, 0x1e11: 0x000a, + 0x1e12: 0x000a, 0x1e13: 0x000a, 0x1e14: 0x000a, 0x1e15: 0x000a, 0x1e16: 0x000a, 0x1e17: 0x000a, + 0x1e18: 0x000a, 0x1e19: 0x000a, 0x1e1a: 0x000a, 0x1e1b: 0x000a, 0x1e1c: 0x000a, 0x1e1d: 0x000a, + 0x1e1e: 0x000a, 0x1e1f: 0x000a, 0x1e20: 0x000a, 0x1e21: 0x000a, 0x1e22: 0x000a, 0x1e23: 0x000a, + 0x1e24: 0x000a, 0x1e25: 0x000a, 0x1e26: 0x000a, 0x1e27: 0x000a, 0x1e28: 0x000a, 0x1e29: 0x000a, + 0x1e2a: 0x000a, 0x1e2b: 0x000a, 0x1e2c: 0x000a, 0x1e2d: 0x000a, 0x1e2e: 0x000a, 0x1e2f: 0x000a, + 0x1e30: 0x000a, 0x1e31: 0x000a, 0x1e32: 0x000a, 0x1e33: 0x000a, 0x1e34: 0x000a, 0x1e35: 0x000a, + 0x1e36: 0x000a, 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, 0x1e3b: 0x000a, + 0x1e3c: 0x000a, 0x1e3d: 0x000a, 0x1e3e: 0x000a, 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x000a, 0x1e41: 0x000a, 0x1e42: 0x000a, 0x1e43: 0x000a, 0x1e44: 0x000a, 0x1e45: 0x000a, + 0x1e46: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e8d: 0x000a, 0x1e8e: 0x000a, 0x1e8f: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1eef: 0x000c, + 0x1ef0: 0x000c, 0x1ef1: 0x000c, 0x1ef2: 0x000c, 0x1ef3: 0x000a, 0x1ef4: 0x000c, 0x1ef5: 0x000c, + 0x1ef6: 0x000c, 0x1ef7: 0x000c, 0x1ef8: 0x000c, 0x1ef9: 0x000c, 0x1efa: 0x000c, 0x1efb: 0x000c, + 0x1efc: 0x000c, 0x1efd: 0x000c, 0x1efe: 0x000a, 0x1eff: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f1e: 0x000c, 0x1f1f: 0x000c, + // Block 0x7d, offset 0x1f40 + 0x1f70: 0x000c, 0x1f71: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0x000a, 0x1f81: 0x000a, 0x1f82: 0x000a, 0x1f83: 0x000a, 0x1f84: 0x000a, 0x1f85: 0x000a, + 0x1f86: 0x000a, 0x1f87: 0x000a, 0x1f88: 0x000a, 0x1f89: 0x000a, 0x1f8a: 0x000a, 0x1f8b: 0x000a, + 0x1f8c: 0x000a, 0x1f8d: 0x000a, 0x1f8e: 0x000a, 0x1f8f: 0x000a, 0x1f90: 0x000a, 0x1f91: 0x000a, + 0x1f92: 0x000a, 0x1f93: 0x000a, 0x1f94: 0x000a, 0x1f95: 0x000a, 0x1f96: 0x000a, 0x1f97: 0x000a, + 0x1f98: 0x000a, 0x1f99: 0x000a, 0x1f9a: 0x000a, 0x1f9b: 0x000a, 0x1f9c: 0x000a, 0x1f9d: 0x000a, + 0x1f9e: 0x000a, 0x1f9f: 0x000a, 0x1fa0: 0x000a, 0x1fa1: 0x000a, + // Block 0x7f, offset 0x1fc0 + 0x1fc8: 0x000a, + // Block 0x80, offset 0x2000 + 0x2002: 0x000c, + 0x2006: 0x000c, 0x200b: 0x000c, + 0x2025: 0x000c, 0x2026: 0x000c, 0x2028: 0x000a, 0x2029: 0x000a, + 0x202a: 0x000a, 0x202b: 0x000a, + 0x2038: 0x0004, 0x2039: 0x0004, + // Block 0x81, offset 0x2040 + 0x2074: 0x000a, 0x2075: 0x000a, + 0x2076: 0x000a, 0x2077: 0x000a, + // Block 0x82, offset 0x2080 + 0x2084: 0x000c, 0x2085: 0x000c, + 0x20a0: 0x000c, 0x20a1: 0x000c, 0x20a2: 0x000c, 0x20a3: 0x000c, + 0x20a4: 0x000c, 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a7: 0x000c, 0x20a8: 0x000c, 0x20a9: 0x000c, + 0x20aa: 0x000c, 0x20ab: 0x000c, 0x20ac: 0x000c, 0x20ad: 0x000c, 0x20ae: 0x000c, 0x20af: 0x000c, + 0x20b0: 0x000c, 0x20b1: 0x000c, + 0x20bf: 0x000c, + // Block 0x83, offset 0x20c0 + 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, + // Block 0x84, offset 0x2100 + 0x2107: 0x000c, 0x2108: 0x000c, 0x2109: 0x000c, 0x210a: 0x000c, 0x210b: 0x000c, + 0x210c: 0x000c, 0x210d: 0x000c, 0x210e: 0x000c, 0x210f: 0x000c, 0x2110: 0x000c, 0x2111: 0x000c, + // Block 0x85, offset 0x2140 + 0x2140: 0x000c, 0x2141: 0x000c, 0x2142: 0x000c, + 0x2173: 0x000c, + 0x2176: 0x000c, 0x2177: 0x000c, 0x2178: 0x000c, 0x2179: 0x000c, + 0x217c: 0x000c, 0x217d: 0x000c, + // Block 0x86, offset 0x2180 + 0x21a5: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e9: 0x000c, + 0x21ea: 0x000c, 0x21eb: 0x000c, 0x21ec: 0x000c, 0x21ed: 0x000c, 0x21ee: 0x000c, + 0x21f1: 0x000c, 0x21f2: 0x000c, 0x21f5: 0x000c, + 0x21f6: 0x000c, + // Block 0x88, offset 0x2200 + 0x2203: 0x000c, + 0x220c: 0x000c, + 0x223c: 0x000c, + // Block 0x89, offset 0x2240 + 0x2270: 0x000c, 0x2272: 0x000c, 0x2273: 0x000c, 0x2274: 0x000c, + 0x2277: 0x000c, 0x2278: 0x000c, + 0x227e: 0x000c, 0x227f: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2281: 0x000c, + 0x22ac: 0x000c, 0x22ad: 0x000c, + 0x22b6: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22e5: 0x000c, 0x22e8: 0x000c, + 0x22ed: 0x000c, + // Block 0x8c, offset 0x2300 + 0x231d: 0x0001, + 0x231e: 0x000c, 0x231f: 0x0001, 0x2320: 0x0001, 0x2321: 0x0001, 0x2322: 0x0001, 0x2323: 0x0001, + 0x2324: 0x0001, 0x2325: 0x0001, 0x2326: 0x0001, 0x2327: 0x0001, 0x2328: 0x0001, 0x2329: 0x0003, + 0x232a: 0x0001, 0x232b: 0x0001, 0x232c: 0x0001, 0x232d: 0x0001, 0x232e: 0x0001, 0x232f: 0x0001, + 0x2330: 0x0001, 0x2331: 0x0001, 0x2332: 0x0001, 0x2333: 0x0001, 0x2334: 0x0001, 0x2335: 0x0001, + 0x2336: 0x0001, 0x2337: 0x0001, 0x2338: 0x0001, 0x2339: 0x0001, 0x233a: 0x0001, 0x233b: 0x0001, + 0x233c: 0x0001, 0x233d: 0x0001, 0x233e: 0x0001, 0x233f: 0x0001, + // Block 0x8d, offset 0x2340 + 0x2340: 0x0001, 0x2341: 0x0001, 0x2342: 0x0001, 0x2343: 0x0001, 0x2344: 0x0001, 0x2345: 0x0001, + 0x2346: 0x0001, 0x2347: 0x0001, 0x2348: 0x0001, 0x2349: 0x0001, 0x234a: 0x0001, 0x234b: 0x0001, + 0x234c: 0x0001, 0x234d: 0x0001, 0x234e: 0x0001, 0x234f: 0x0001, 0x2350: 0x000d, 0x2351: 0x000d, + 0x2352: 0x000d, 0x2353: 0x000d, 0x2354: 0x000d, 0x2355: 0x000d, 0x2356: 0x000d, 0x2357: 0x000d, + 0x2358: 0x000d, 0x2359: 0x000d, 0x235a: 0x000d, 0x235b: 0x000d, 0x235c: 0x000d, 0x235d: 0x000d, + 0x235e: 0x000d, 0x235f: 0x000d, 0x2360: 0x000d, 0x2361: 0x000d, 0x2362: 0x000d, 0x2363: 0x000d, + 0x2364: 0x000d, 0x2365: 0x000d, 0x2366: 0x000d, 0x2367: 0x000d, 0x2368: 0x000d, 0x2369: 0x000d, + 0x236a: 0x000d, 0x236b: 0x000d, 0x236c: 0x000d, 0x236d: 0x000d, 0x236e: 0x000d, 0x236f: 0x000d, + 0x2370: 0x000d, 0x2371: 0x000d, 0x2372: 0x000d, 0x2373: 0x000d, 0x2374: 0x000d, 0x2375: 0x000d, + 0x2376: 0x000d, 0x2377: 0x000d, 0x2378: 0x000d, 0x2379: 0x000d, 0x237a: 0x000d, 0x237b: 0x000d, + 0x237c: 0x000d, 0x237d: 0x000d, 0x237e: 0x000d, 0x237f: 0x000d, + // Block 0x8e, offset 0x2380 + 0x2380: 0x000d, 0x2381: 0x000d, 0x2382: 0x000d, 0x2383: 0x000d, 0x2384: 0x000d, 0x2385: 0x000d, + 0x2386: 0x000d, 0x2387: 0x000d, 0x2388: 0x000d, 0x2389: 0x000d, 0x238a: 0x000d, 0x238b: 0x000d, + 0x238c: 0x000d, 0x238d: 0x000d, 0x238e: 0x000d, 0x238f: 0x000d, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000a, 0x23bf: 0x000a, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000b, 0x23d1: 0x000b, + 0x23d2: 0x000b, 0x23d3: 0x000b, 0x23d4: 0x000b, 0x23d5: 0x000b, 0x23d6: 0x000b, 0x23d7: 0x000b, + 0x23d8: 0x000b, 0x23d9: 0x000b, 0x23da: 0x000b, 0x23db: 0x000b, 0x23dc: 0x000b, 0x23dd: 0x000b, + 0x23de: 0x000b, 0x23df: 0x000b, 0x23e0: 0x000b, 0x23e1: 0x000b, 0x23e2: 0x000b, 0x23e3: 0x000b, + 0x23e4: 0x000b, 0x23e5: 0x000b, 0x23e6: 0x000b, 0x23e7: 0x000b, 0x23e8: 0x000b, 0x23e9: 0x000b, + 0x23ea: 0x000b, 0x23eb: 0x000b, 0x23ec: 0x000b, 0x23ed: 0x000b, 0x23ee: 0x000b, 0x23ef: 0x000b, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000a, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000c, 0x2401: 0x000c, 0x2402: 0x000c, 0x2403: 0x000c, 0x2404: 0x000c, 0x2405: 0x000c, + 0x2406: 0x000c, 0x2407: 0x000c, 0x2408: 0x000c, 0x2409: 0x000c, 0x240a: 0x000c, 0x240b: 0x000c, + 0x240c: 0x000c, 0x240d: 0x000c, 0x240e: 0x000c, 0x240f: 0x000c, 0x2410: 0x000a, 0x2411: 0x000a, + 0x2412: 0x000a, 0x2413: 0x000a, 0x2414: 0x000a, 0x2415: 0x000a, 0x2416: 0x000a, 0x2417: 0x000a, + 0x2418: 0x000a, 0x2419: 0x000a, + 0x2420: 0x000c, 0x2421: 0x000c, 0x2422: 0x000c, 0x2423: 0x000c, + 0x2424: 0x000c, 0x2425: 0x000c, 0x2426: 0x000c, 0x2427: 0x000c, 0x2428: 0x000c, 0x2429: 0x000c, + 0x242a: 0x000c, 0x242b: 0x000c, 0x242c: 0x000c, 0x242d: 0x000c, 0x242e: 0x000c, 0x242f: 0x000c, + 0x2430: 0x000a, 0x2431: 0x000a, 0x2432: 0x000a, 0x2433: 0x000a, 0x2434: 0x000a, 0x2435: 0x000a, + 0x2436: 0x000a, 0x2437: 0x000a, 0x2438: 0x000a, 0x2439: 0x000a, 0x243a: 0x000a, 0x243b: 0x000a, + 0x243c: 0x000a, 0x243d: 0x000a, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000a, 0x2441: 0x000a, 0x2442: 0x000a, 0x2443: 0x000a, 0x2444: 0x000a, 0x2445: 0x000a, + 0x2446: 0x000a, 0x2447: 0x000a, 0x2448: 0x000a, 0x2449: 0x000a, 0x244a: 0x000a, 0x244b: 0x000a, + 0x244c: 0x000a, 0x244d: 0x000a, 0x244e: 0x000a, 0x244f: 0x000a, 0x2450: 0x0006, 0x2451: 0x000a, + 0x2452: 0x0006, 0x2454: 0x000a, 0x2455: 0x0006, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x009a, 0x245a: 0x008a, 0x245b: 0x007a, 0x245c: 0x006a, 0x245d: 0x009a, + 0x245e: 0x008a, 0x245f: 0x0004, 0x2460: 0x000a, 0x2461: 0x000a, 0x2462: 0x0003, 0x2463: 0x0003, + 0x2464: 0x000a, 0x2465: 0x000a, 0x2466: 0x000a, 0x2468: 0x000a, 0x2469: 0x0004, + 0x246a: 0x0004, 0x246b: 0x000a, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000d, 0x2490: 0x000d, 0x2491: 0x000d, + 0x2492: 0x000d, 0x2493: 0x000d, 0x2494: 0x000d, 0x2495: 0x000d, 0x2496: 0x000d, 0x2497: 0x000d, + 0x2498: 0x000d, 0x2499: 0x000d, 0x249a: 0x000d, 0x249b: 0x000d, 0x249c: 0x000d, 0x249d: 0x000d, + 0x249e: 0x000d, 0x249f: 0x000d, 0x24a0: 0x000d, 0x24a1: 0x000d, 0x24a2: 0x000d, 0x24a3: 0x000d, + 0x24a4: 0x000d, 0x24a5: 0x000d, 0x24a6: 0x000d, 0x24a7: 0x000d, 0x24a8: 0x000d, 0x24a9: 0x000d, + 0x24aa: 0x000d, 0x24ab: 0x000d, 0x24ac: 0x000d, 0x24ad: 0x000d, 0x24ae: 0x000d, 0x24af: 0x000d, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000b, + // Block 0x93, offset 0x24c0 + 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x0004, 0x24c4: 0x0004, 0x24c5: 0x0004, + 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x003a, 0x24c9: 0x002a, 0x24ca: 0x000a, 0x24cb: 0x0003, + 0x24cc: 0x0006, 0x24cd: 0x0003, 0x24ce: 0x0006, 0x24cf: 0x0006, 0x24d0: 0x0002, 0x24d1: 0x0002, + 0x24d2: 0x0002, 0x24d3: 0x0002, 0x24d4: 0x0002, 0x24d5: 0x0002, 0x24d6: 0x0002, 0x24d7: 0x0002, + 0x24d8: 0x0002, 0x24d9: 0x0002, 0x24da: 0x0006, 0x24db: 0x000a, 0x24dc: 0x000a, 0x24dd: 0x000a, + 0x24de: 0x000a, 0x24df: 0x000a, 0x24e0: 0x000a, + 0x24fb: 0x005a, + 0x24fc: 0x000a, 0x24fd: 0x004a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, + 0x251b: 0x005a, 0x251c: 0x000a, 0x251d: 0x004a, + 0x251e: 0x000a, 0x251f: 0x00fa, 0x2520: 0x00ea, 0x2521: 0x000a, 0x2522: 0x003a, 0x2523: 0x002a, + 0x2524: 0x000a, 0x2525: 0x000a, + // Block 0x95, offset 0x2540 + 0x2560: 0x0004, 0x2561: 0x0004, 0x2562: 0x000a, 0x2563: 0x000a, + 0x2564: 0x000a, 0x2565: 0x0004, 0x2566: 0x0004, 0x2568: 0x000a, 0x2569: 0x000a, + 0x256a: 0x000a, 0x256b: 0x000a, 0x256c: 0x000a, 0x256d: 0x000a, 0x256e: 0x000a, + 0x2570: 0x000b, 0x2571: 0x000b, 0x2572: 0x000b, 0x2573: 0x000b, 0x2574: 0x000b, 0x2575: 0x000b, + 0x2576: 0x000b, 0x2577: 0x000b, 0x2578: 0x000b, 0x2579: 0x000a, 0x257a: 0x000a, 0x257b: 0x000a, + 0x257c: 0x000a, 0x257d: 0x000a, 0x257e: 0x000b, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, 0x25c1: 0x000a, 0x25c2: 0x000a, 0x25c3: 0x000a, 0x25c4: 0x000a, 0x25c5: 0x000a, + 0x25c6: 0x000a, 0x25c7: 0x000a, 0x25c8: 0x000a, 0x25c9: 0x000a, 0x25ca: 0x000a, 0x25cb: 0x000a, + 0x25cc: 0x000a, 0x25d0: 0x000a, 0x25d1: 0x000a, + 0x25d2: 0x000a, 0x25d3: 0x000a, 0x25d4: 0x000a, 0x25d5: 0x000a, 0x25d6: 0x000a, 0x25d7: 0x000a, + 0x25d8: 0x000a, 0x25d9: 0x000a, 0x25da: 0x000a, 0x25db: 0x000a, + 0x25e0: 0x000a, + // Block 0x98, offset 0x2600 + 0x263d: 0x000c, + // Block 0x99, offset 0x2640 + 0x2660: 0x000c, 0x2661: 0x0002, 0x2662: 0x0002, 0x2663: 0x0002, + 0x2664: 0x0002, 0x2665: 0x0002, 0x2666: 0x0002, 0x2667: 0x0002, 0x2668: 0x0002, 0x2669: 0x0002, + 0x266a: 0x0002, 0x266b: 0x0002, 0x266c: 0x0002, 0x266d: 0x0002, 0x266e: 0x0002, 0x266f: 0x0002, + 0x2670: 0x0002, 0x2671: 0x0002, 0x2672: 0x0002, 0x2673: 0x0002, 0x2674: 0x0002, 0x2675: 0x0002, + 0x2676: 0x0002, 0x2677: 0x0002, 0x2678: 0x0002, 0x2679: 0x0002, 0x267a: 0x0002, 0x267b: 0x0002, + // Block 0x9a, offset 0x2680 + 0x26b6: 0x000c, 0x26b7: 0x000c, 0x26b8: 0x000c, 0x26b9: 0x000c, 0x26ba: 0x000c, + // Block 0x9b, offset 0x26c0 + 0x26c0: 0x0001, 0x26c1: 0x0001, 0x26c2: 0x0001, 0x26c3: 0x0001, 0x26c4: 0x0001, 0x26c5: 0x0001, + 0x26c6: 0x0001, 0x26c7: 0x0001, 0x26c8: 0x0001, 0x26c9: 0x0001, 0x26ca: 0x0001, 0x26cb: 0x0001, + 0x26cc: 0x0001, 0x26cd: 0x0001, 0x26ce: 0x0001, 0x26cf: 0x0001, 0x26d0: 0x0001, 0x26d1: 0x0001, + 0x26d2: 0x0001, 0x26d3: 0x0001, 0x26d4: 0x0001, 0x26d5: 0x0001, 0x26d6: 0x0001, 0x26d7: 0x0001, + 0x26d8: 0x0001, 0x26d9: 0x0001, 0x26da: 0x0001, 0x26db: 0x0001, 0x26dc: 0x0001, 0x26dd: 0x0001, + 0x26de: 0x0001, 0x26df: 0x0001, 0x26e0: 0x0001, 0x26e1: 0x0001, 0x26e2: 0x0001, 0x26e3: 0x0001, + 0x26e4: 0x0001, 0x26e5: 0x0001, 0x26e6: 0x0001, 0x26e7: 0x0001, 0x26e8: 0x0001, 0x26e9: 0x0001, + 0x26ea: 0x0001, 0x26eb: 0x0001, 0x26ec: 0x0001, 0x26ed: 0x0001, 0x26ee: 0x0001, 0x26ef: 0x0001, + 0x26f0: 0x0001, 0x26f1: 0x0001, 0x26f2: 0x0001, 0x26f3: 0x0001, 0x26f4: 0x0001, 0x26f5: 0x0001, + 0x26f6: 0x0001, 0x26f7: 0x0001, 0x26f8: 0x0001, 0x26f9: 0x0001, 0x26fa: 0x0001, 0x26fb: 0x0001, + 0x26fc: 0x0001, 0x26fd: 0x0001, 0x26fe: 0x0001, 0x26ff: 0x0001, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x000a, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x000c, 0x2742: 0x000c, 0x2743: 0x000c, 0x2744: 0x0001, 0x2745: 0x000c, + 0x2746: 0x000c, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x000c, 0x274d: 0x000c, 0x274e: 0x000c, 0x274f: 0x000c, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x000c, 0x27a6: 0x000c, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x000a, 0x27fa: 0x000a, 0x27fb: 0x000a, + 0x27fc: 0x000a, 0x27fd: 0x000a, 0x27fe: 0x000a, 0x27ff: 0x000a, + // Block 0xa0, offset 0x2800 + 0x2800: 0x000d, 0x2801: 0x000d, 0x2802: 0x000d, 0x2803: 0x000d, 0x2804: 0x000d, 0x2805: 0x000d, + 0x2806: 0x000d, 0x2807: 0x000d, 0x2808: 0x000d, 0x2809: 0x000d, 0x280a: 0x000d, 0x280b: 0x000d, + 0x280c: 0x000d, 0x280d: 0x000d, 0x280e: 0x000d, 0x280f: 0x000d, 0x2810: 0x000d, 0x2811: 0x000d, + 0x2812: 0x000d, 0x2813: 0x000d, 0x2814: 0x000d, 0x2815: 0x000d, 0x2816: 0x000d, 0x2817: 0x000d, + 0x2818: 0x000d, 0x2819: 0x000d, 0x281a: 0x000d, 0x281b: 0x000d, 0x281c: 0x000d, 0x281d: 0x000d, + 0x281e: 0x000d, 0x281f: 0x000d, 0x2820: 0x000d, 0x2821: 0x000d, 0x2822: 0x000d, 0x2823: 0x000d, + 0x2824: 0x000c, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x000c, 0x2828: 0x000d, 0x2829: 0x000d, + 0x282a: 0x000d, 0x282b: 0x000d, 0x282c: 0x000d, 0x282d: 0x000d, 0x282e: 0x000d, 0x282f: 0x000d, + 0x2830: 0x0005, 0x2831: 0x0005, 0x2832: 0x0005, 0x2833: 0x0005, 0x2834: 0x0005, 0x2835: 0x0005, + 0x2836: 0x0005, 0x2837: 0x0005, 0x2838: 0x0005, 0x2839: 0x0005, 0x283a: 0x000d, 0x283b: 0x000d, + 0x283c: 0x000d, 0x283d: 0x000d, 0x283e: 0x000d, 0x283f: 0x000d, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0005, 0x2861: 0x0005, 0x2862: 0x0005, 0x2863: 0x0005, + 0x2864: 0x0005, 0x2865: 0x0005, 0x2866: 0x0005, 0x2867: 0x0005, 0x2868: 0x0005, 0x2869: 0x0005, + 0x286a: 0x0005, 0x286b: 0x0005, 0x286c: 0x0005, 0x286d: 0x0005, 0x286e: 0x0005, 0x286f: 0x0005, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x0005, 0x287b: 0x0005, + 0x287c: 0x0005, 0x287d: 0x0005, 0x287e: 0x0005, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x000d, 0x28b1: 0x000d, 0x28b2: 0x000d, 0x28b3: 0x000d, 0x28b4: 0x000d, 0x28b5: 0x000d, + 0x28b6: 0x000d, 0x28b7: 0x000d, 0x28b8: 0x000d, 0x28b9: 0x000d, 0x28ba: 0x000d, 0x28bb: 0x000d, + 0x28bc: 0x000d, 0x28bd: 0x000d, 0x28be: 0x000d, 0x28bf: 0x000d, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000c, 0x28c7: 0x000c, 0x28c8: 0x000c, 0x28c9: 0x000c, 0x28ca: 0x000c, 0x28cb: 0x000c, + 0x28cc: 0x000c, 0x28cd: 0x000c, 0x28ce: 0x000c, 0x28cf: 0x000c, 0x28d0: 0x000c, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000d, 0x28e5: 0x000d, 0x28e6: 0x000d, 0x28e7: 0x000d, 0x28e8: 0x000d, 0x28e9: 0x000d, + 0x28ea: 0x000d, 0x28eb: 0x000d, 0x28ec: 0x000d, 0x28ed: 0x000d, 0x28ee: 0x000d, 0x28ef: 0x000d, + 0x28f0: 0x0001, 0x28f1: 0x0001, 0x28f2: 0x0001, 0x28f3: 0x0001, 0x28f4: 0x0001, 0x28f5: 0x0001, + 0x28f6: 0x0001, 0x28f7: 0x0001, 0x28f8: 0x0001, 0x28f9: 0x0001, 0x28fa: 0x0001, 0x28fb: 0x0001, + 0x28fc: 0x0001, 0x28fd: 0x0001, 0x28fe: 0x0001, 0x28ff: 0x0001, + // Block 0xa4, offset 0x2900 + 0x2901: 0x000c, + 0x2938: 0x000c, 0x2939: 0x000c, 0x293a: 0x000c, 0x293b: 0x000c, + 0x293c: 0x000c, 0x293d: 0x000c, 0x293e: 0x000c, 0x293f: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, 0x2942: 0x000c, 0x2943: 0x000c, 0x2944: 0x000c, 0x2945: 0x000c, + 0x2946: 0x000c, + 0x2952: 0x000a, 0x2953: 0x000a, 0x2954: 0x000a, 0x2955: 0x000a, 0x2956: 0x000a, 0x2957: 0x000a, + 0x2958: 0x000a, 0x2959: 0x000a, 0x295a: 0x000a, 0x295b: 0x000a, 0x295c: 0x000a, 0x295d: 0x000a, + 0x295e: 0x000a, 0x295f: 0x000a, 0x2960: 0x000a, 0x2961: 0x000a, 0x2962: 0x000a, 0x2963: 0x000a, + 0x2964: 0x000a, 0x2965: 0x000a, + 0x297f: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, + 0x29b3: 0x000c, 0x29b4: 0x000c, 0x29b5: 0x000c, + 0x29b6: 0x000c, 0x29b9: 0x000c, 0x29ba: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, 0x29c2: 0x000c, + 0x29e7: 0x000c, 0x29e8: 0x000c, 0x29e9: 0x000c, + 0x29ea: 0x000c, 0x29eb: 0x000c, 0x29ed: 0x000c, 0x29ee: 0x000c, 0x29ef: 0x000c, + 0x29f0: 0x000c, 0x29f1: 0x000c, 0x29f2: 0x000c, 0x29f3: 0x000c, 0x29f4: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a33: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x000c, 0x2a41: 0x000c, + 0x2a76: 0x000c, 0x2a77: 0x000c, 0x2a78: 0x000c, 0x2a79: 0x000c, 0x2a7a: 0x000c, 0x2a7b: 0x000c, + 0x2a7c: 0x000c, 0x2a7d: 0x000c, 0x2a7e: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a89: 0x000c, 0x2a8a: 0x000c, 0x2a8b: 0x000c, + 0x2a8c: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2aef: 0x000c, + 0x2af0: 0x000c, 0x2af1: 0x000c, 0x2af4: 0x000c, + 0x2af6: 0x000c, 0x2af7: 0x000c, + 0x2afe: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b1f: 0x000c, 0x2b23: 0x000c, + 0x2b24: 0x000c, 0x2b25: 0x000c, 0x2b26: 0x000c, 0x2b27: 0x000c, 0x2b28: 0x000c, 0x2b29: 0x000c, + 0x2b2a: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, + 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, + 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, + 0x2bc6: 0x000c, + 0x2bde: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, + 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, + 0x2c3f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2cdc: 0x000c, 0x2cdd: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, + 0x2d3d: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, + 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, + 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, + 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, + // Block 0xb6, offset 0x2d80 + 0x2dab: 0x000c, 0x2dad: 0x000c, + 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db7: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2ddd: 0x000c, + 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, + 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, + 0x2dea: 0x000c, 0x2deb: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e2f: 0x000c, + 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e37: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, + 0x2e5a: 0x000c, 0x2e5b: 0x000c, + 0x2e60: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c, + 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c, + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec7: 0x000c, + 0x2ed1: 0x000c, + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, + 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0a: 0x000c, 0x2f0b: 0x000c, + 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c, + 0x2f18: 0x000c, 0x2f19: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c, + 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7d: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c, + 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c, + 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c, + 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c, + 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, 0x2ffa: 0x000c, + 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3007: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3050: 0x000c, 0x3051: 0x000c, + 0x3055: 0x000c, 0x3057: 0x000c, + // Block 0xc2, offset 0x3080 + 0x30b3: 0x000c, 0x30b4: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30d5: 0x000a, 0x30d6: 0x000a, 0x30d7: 0x000a, + 0x30d8: 0x000a, 0x30d9: 0x000a, 0x30da: 0x000a, 0x30db: 0x000a, 0x30dc: 0x000a, 0x30dd: 0x0004, + 0x30de: 0x0004, 0x30df: 0x0004, 0x30e0: 0x0004, 0x30e1: 0x000a, 0x30e2: 0x000a, 0x30e3: 0x000a, + 0x30e4: 0x000a, 0x30e5: 0x000a, 0x30e6: 0x000a, 0x30e7: 0x000a, 0x30e8: 0x000a, 0x30e9: 0x000a, + 0x30ea: 0x000a, 0x30eb: 0x000a, 0x30ec: 0x000a, 0x30ed: 0x000a, 0x30ee: 0x000a, 0x30ef: 0x000a, + 0x30f0: 0x000a, 0x30f1: 0x000a, + // Block 0xc4, offset 0x3100 + 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3170: 0x000c, 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, + // Block 0xc6, offset 0x3180 + 0x318f: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d2: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3222: 0x000a, + // Block 0xc9, offset 0x3240 + 0x325d: 0x000c, + 0x325e: 0x000c, 0x3260: 0x000b, 0x3261: 0x000b, 0x3262: 0x000b, 0x3263: 0x000b, + // Block 0xca, offset 0x3280 + 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, + 0x32b3: 0x000b, 0x32b4: 0x000b, 0x32b5: 0x000b, + 0x32b6: 0x000b, 0x32b7: 0x000b, 0x32b8: 0x000b, 0x32b9: 0x000b, 0x32ba: 0x000b, 0x32bb: 0x000c, + 0x32bc: 0x000c, 0x32bd: 0x000c, 0x32be: 0x000c, 0x32bf: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x000c, 0x32c1: 0x000c, 0x32c2: 0x000c, 0x32c5: 0x000c, + 0x32c6: 0x000c, 0x32c7: 0x000c, 0x32c8: 0x000c, 0x32c9: 0x000c, 0x32ca: 0x000c, 0x32cb: 0x000c, + 0x32ea: 0x000c, 0x32eb: 0x000c, 0x32ec: 0x000c, 0x32ed: 0x000c, + // Block 0xcc, offset 0x3300 + 0x3300: 0x000a, 0x3301: 0x000a, 0x3302: 0x000c, 0x3303: 0x000c, 0x3304: 0x000c, 0x3305: 0x000a, + // Block 0xcd, offset 0x3340 + 0x3340: 0x000a, 0x3341: 0x000a, 0x3342: 0x000a, 0x3343: 0x000a, 0x3344: 0x000a, 0x3345: 0x000a, + 0x3346: 0x000a, 0x3347: 0x000a, 0x3348: 0x000a, 0x3349: 0x000a, 0x334a: 0x000a, 0x334b: 0x000a, + 0x334c: 0x000a, 0x334d: 0x000a, 0x334e: 0x000a, 0x334f: 0x000a, 0x3350: 0x000a, 0x3351: 0x000a, + 0x3352: 0x000a, 0x3353: 0x000a, 0x3354: 0x000a, 0x3355: 0x000a, 0x3356: 0x000a, + // Block 0xce, offset 0x3380 + 0x339b: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33d5: 0x000a, + // Block 0xd0, offset 0x3400 + 0x340f: 0x000a, + // Block 0xd1, offset 0x3440 + 0x3449: 0x000a, + // Block 0xd2, offset 0x3480 + 0x3483: 0x000a, + 0x348e: 0x0002, 0x348f: 0x0002, 0x3490: 0x0002, 0x3491: 0x0002, + 0x3492: 0x0002, 0x3493: 0x0002, 0x3494: 0x0002, 0x3495: 0x0002, 0x3496: 0x0002, 0x3497: 0x0002, + 0x3498: 0x0002, 0x3499: 0x0002, 0x349a: 0x0002, 0x349b: 0x0002, 0x349c: 0x0002, 0x349d: 0x0002, + 0x349e: 0x0002, 0x349f: 0x0002, 0x34a0: 0x0002, 0x34a1: 0x0002, 0x34a2: 0x0002, 0x34a3: 0x0002, + 0x34a4: 0x0002, 0x34a5: 0x0002, 0x34a6: 0x0002, 0x34a7: 0x0002, 0x34a8: 0x0002, 0x34a9: 0x0002, + 0x34aa: 0x0002, 0x34ab: 0x0002, 0x34ac: 0x0002, 0x34ad: 0x0002, 0x34ae: 0x0002, 0x34af: 0x0002, + 0x34b0: 0x0002, 0x34b1: 0x0002, 0x34b2: 0x0002, 0x34b3: 0x0002, 0x34b4: 0x0002, 0x34b5: 0x0002, + 0x34b6: 0x0002, 0x34b7: 0x0002, 0x34b8: 0x0002, 0x34b9: 0x0002, 0x34ba: 0x0002, 0x34bb: 0x0002, + 0x34bc: 0x0002, 0x34bd: 0x0002, 0x34be: 0x0002, 0x34bf: 0x0002, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c, + 0x34c6: 0x000c, 0x34c7: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c, + 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c, + 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c, + 0x34d8: 0x000c, 0x34d9: 0x000c, 0x34da: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c, + 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e2: 0x000c, 0x34e3: 0x000c, + 0x34e4: 0x000c, 0x34e5: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c, + 0x34ea: 0x000c, 0x34eb: 0x000c, 0x34ec: 0x000c, 0x34ed: 0x000c, 0x34ee: 0x000c, 0x34ef: 0x000c, + 0x34f0: 0x000c, 0x34f1: 0x000c, 0x34f2: 0x000c, 0x34f3: 0x000c, 0x34f4: 0x000c, 0x34f5: 0x000c, + 0x34f6: 0x000c, 0x34fb: 0x000c, + 0x34fc: 0x000c, 0x34fd: 0x000c, 0x34fe: 0x000c, 0x34ff: 0x000c, + // Block 0xd4, offset 0x3500 + 0x3500: 0x000c, 0x3501: 0x000c, 0x3502: 0x000c, 0x3503: 0x000c, 0x3504: 0x000c, 0x3505: 0x000c, + 0x3506: 0x000c, 0x3507: 0x000c, 0x3508: 0x000c, 0x3509: 0x000c, 0x350a: 0x000c, 0x350b: 0x000c, + 0x350c: 0x000c, 0x350d: 0x000c, 0x350e: 0x000c, 0x350f: 0x000c, 0x3510: 0x000c, 0x3511: 0x000c, + 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x000c, + 0x3518: 0x000c, 0x3519: 0x000c, 0x351a: 0x000c, 0x351b: 0x000c, 0x351c: 0x000c, 0x351d: 0x000c, + 0x351e: 0x000c, 0x351f: 0x000c, 0x3520: 0x000c, 0x3521: 0x000c, 0x3522: 0x000c, 0x3523: 0x000c, + 0x3524: 0x000c, 0x3525: 0x000c, 0x3526: 0x000c, 0x3527: 0x000c, 0x3528: 0x000c, 0x3529: 0x000c, + 0x352a: 0x000c, 0x352b: 0x000c, 0x352c: 0x000c, + 0x3535: 0x000c, + // Block 0xd5, offset 0x3540 + 0x3544: 0x000c, + 0x355b: 0x000c, 0x355c: 0x000c, 0x355d: 0x000c, + 0x355e: 0x000c, 0x355f: 0x000c, 0x3561: 0x000c, 0x3562: 0x000c, 0x3563: 0x000c, + 0x3564: 0x000c, 0x3565: 0x000c, 0x3566: 0x000c, 0x3567: 0x000c, 0x3568: 0x000c, 0x3569: 0x000c, + 0x356a: 0x000c, 0x356b: 0x000c, 0x356c: 0x000c, 0x356d: 0x000c, 0x356e: 0x000c, 0x356f: 0x000c, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000c, 0x3581: 0x000c, 0x3582: 0x000c, 0x3583: 0x000c, 0x3584: 0x000c, 0x3585: 0x000c, + 0x3586: 0x000c, 0x3588: 0x000c, 0x3589: 0x000c, 0x358a: 0x000c, 0x358b: 0x000c, + 0x358c: 0x000c, 0x358d: 0x000c, 0x358e: 0x000c, 0x358f: 0x000c, 0x3590: 0x000c, 0x3591: 0x000c, + 0x3592: 0x000c, 0x3593: 0x000c, 0x3594: 0x000c, 0x3595: 0x000c, 0x3596: 0x000c, 0x3597: 0x000c, + 0x3598: 0x000c, 0x359b: 0x000c, 0x359c: 0x000c, 0x359d: 0x000c, + 0x359e: 0x000c, 0x359f: 0x000c, 0x35a0: 0x000c, 0x35a1: 0x000c, 0x35a3: 0x000c, + 0x35a4: 0x000c, 0x35a6: 0x000c, 0x35a7: 0x000c, 0x35a8: 0x000c, 0x35a9: 0x000c, + 0x35aa: 0x000c, + // Block 0xd7, offset 0x35c0 + 0x35ec: 0x000c, 0x35ed: 0x000c, 0x35ee: 0x000c, 0x35ef: 0x000c, + 0x35ff: 0x0004, + // Block 0xd8, offset 0x3600 + 0x3600: 0x0001, 0x3601: 0x0001, 0x3602: 0x0001, 0x3603: 0x0001, 0x3604: 0x0001, 0x3605: 0x0001, + 0x3606: 0x0001, 0x3607: 0x0001, 0x3608: 0x0001, 0x3609: 0x0001, 0x360a: 0x0001, 0x360b: 0x0001, + 0x360c: 0x0001, 0x360d: 0x0001, 0x360e: 0x0001, 0x360f: 0x0001, 0x3610: 0x000c, 0x3611: 0x000c, + 0x3612: 0x000c, 0x3613: 0x000c, 0x3614: 0x000c, 0x3615: 0x000c, 0x3616: 0x000c, 0x3617: 0x0001, + 0x3618: 0x0001, 0x3619: 0x0001, 0x361a: 0x0001, 0x361b: 0x0001, 0x361c: 0x0001, 0x361d: 0x0001, + 0x361e: 0x0001, 0x361f: 0x0001, 0x3620: 0x0001, 0x3621: 0x0001, 0x3622: 0x0001, 0x3623: 0x0001, + 0x3624: 0x0001, 0x3625: 0x0001, 0x3626: 0x0001, 0x3627: 0x0001, 0x3628: 0x0001, 0x3629: 0x0001, + 0x362a: 0x0001, 0x362b: 0x0001, 0x362c: 0x0001, 0x362d: 0x0001, 0x362e: 0x0001, 0x362f: 0x0001, + 0x3630: 0x0001, 0x3631: 0x0001, 0x3632: 0x0001, 0x3633: 0x0001, 0x3634: 0x0001, 0x3635: 0x0001, + 0x3636: 0x0001, 0x3637: 0x0001, 0x3638: 0x0001, 0x3639: 0x0001, 0x363a: 0x0001, 0x363b: 0x0001, + 0x363c: 0x0001, 0x363d: 0x0001, 0x363e: 0x0001, 0x363f: 0x0001, + // Block 0xd9, offset 0x3640 + 0x3640: 0x0001, 0x3641: 0x0001, 0x3642: 0x0001, 0x3643: 0x0001, 0x3644: 0x000c, 0x3645: 0x000c, + 0x3646: 0x000c, 0x3647: 0x000c, 0x3648: 0x000c, 0x3649: 0x000c, 0x364a: 0x000c, 0x364b: 0x0001, + 0x364c: 0x0001, 0x364d: 0x0001, 0x364e: 0x0001, 0x364f: 0x0001, 0x3650: 0x0001, 0x3651: 0x0001, + 0x3652: 0x0001, 0x3653: 0x0001, 0x3654: 0x0001, 0x3655: 0x0001, 0x3656: 0x0001, 0x3657: 0x0001, + 0x3658: 0x0001, 0x3659: 0x0001, 0x365a: 0x0001, 0x365b: 0x0001, 0x365c: 0x0001, 0x365d: 0x0001, + 0x365e: 0x0001, 0x365f: 0x0001, 0x3660: 0x0001, 0x3661: 0x0001, 0x3662: 0x0001, 0x3663: 0x0001, + 0x3664: 0x0001, 0x3665: 0x0001, 0x3666: 0x0001, 0x3667: 0x0001, 0x3668: 0x0001, 0x3669: 0x0001, + 0x366a: 0x0001, 0x366b: 0x0001, 0x366c: 0x0001, 0x366d: 0x0001, 0x366e: 0x0001, 0x366f: 0x0001, + 0x3670: 0x0001, 0x3671: 0x0001, 0x3672: 0x0001, 0x3673: 0x0001, 0x3674: 0x0001, 0x3675: 0x0001, + 0x3676: 0x0001, 0x3677: 0x0001, 0x3678: 0x0001, 0x3679: 0x0001, 0x367a: 0x0001, 0x367b: 0x0001, + 0x367c: 0x0001, 0x367d: 0x0001, 0x367e: 0x0001, 0x367f: 0x0001, + // Block 0xda, offset 0x3680 + 0x3680: 0x000d, 0x3681: 0x000d, 0x3682: 0x000d, 0x3683: 0x000d, 0x3684: 0x000d, 0x3685: 0x000d, + 0x3686: 0x000d, 0x3687: 0x000d, 0x3688: 0x000d, 0x3689: 0x000d, 0x368a: 0x000d, 0x368b: 0x000d, + 0x368c: 0x000d, 0x368d: 0x000d, 0x368e: 0x000d, 0x368f: 0x000d, 0x3690: 0x0001, 0x3691: 0x0001, + 0x3692: 0x0001, 0x3693: 0x0001, 0x3694: 0x0001, 0x3695: 0x0001, 0x3696: 0x0001, 0x3697: 0x0001, + 0x3698: 0x0001, 0x3699: 0x0001, 0x369a: 0x0001, 0x369b: 0x0001, 0x369c: 0x0001, 0x369d: 0x0001, + 0x369e: 0x0001, 0x369f: 0x0001, 0x36a0: 0x0001, 0x36a1: 0x0001, 0x36a2: 0x0001, 0x36a3: 0x0001, + 0x36a4: 0x0001, 0x36a5: 0x0001, 0x36a6: 0x0001, 0x36a7: 0x0001, 0x36a8: 0x0001, 0x36a9: 0x0001, + 0x36aa: 0x0001, 0x36ab: 0x0001, 0x36ac: 0x0001, 0x36ad: 0x0001, 0x36ae: 0x0001, 0x36af: 0x0001, + 0x36b0: 0x0001, 0x36b1: 0x0001, 0x36b2: 0x0001, 0x36b3: 0x0001, 0x36b4: 0x0001, 0x36b5: 0x0001, + 0x36b6: 0x0001, 0x36b7: 0x0001, 0x36b8: 0x0001, 0x36b9: 0x0001, 0x36ba: 0x0001, 0x36bb: 0x0001, + 0x36bc: 0x0001, 0x36bd: 0x0001, 0x36be: 0x0001, 0x36bf: 0x0001, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x000d, 0x36c1: 0x000d, 0x36c2: 0x000d, 0x36c3: 0x000d, 0x36c4: 0x000d, 0x36c5: 0x000d, + 0x36c6: 0x000d, 0x36c7: 0x000d, 0x36c8: 0x000d, 0x36c9: 0x000d, 0x36ca: 0x000d, 0x36cb: 0x000d, + 0x36cc: 0x000d, 0x36cd: 0x000d, 0x36ce: 0x000d, 0x36cf: 0x000d, 0x36d0: 0x000d, 0x36d1: 0x000d, + 0x36d2: 0x000d, 0x36d3: 0x000d, 0x36d4: 0x000d, 0x36d5: 0x000d, 0x36d6: 0x000d, 0x36d7: 0x000d, + 0x36d8: 0x000d, 0x36d9: 0x000d, 0x36da: 0x000d, 0x36db: 0x000d, 0x36dc: 0x000d, 0x36dd: 0x000d, + 0x36de: 0x000d, 0x36df: 0x000d, 0x36e0: 0x000d, 0x36e1: 0x000d, 0x36e2: 0x000d, 0x36e3: 0x000d, + 0x36e4: 0x000d, 0x36e5: 0x000d, 0x36e6: 0x000d, 0x36e7: 0x000d, 0x36e8: 0x000d, 0x36e9: 0x000d, + 0x36ea: 0x000d, 0x36eb: 0x000d, 0x36ec: 0x000d, 0x36ed: 0x000d, 0x36ee: 0x000d, 0x36ef: 0x000d, + 0x36f0: 0x000a, 0x36f1: 0x000a, 0x36f2: 0x000d, 0x36f3: 0x000d, 0x36f4: 0x000d, 0x36f5: 0x000d, + 0x36f6: 0x000d, 0x36f7: 0x000d, 0x36f8: 0x000d, 0x36f9: 0x000d, 0x36fa: 0x000d, 0x36fb: 0x000d, + 0x36fc: 0x000d, 0x36fd: 0x000d, 0x36fe: 0x000d, 0x36ff: 0x000d, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000a, 0x3701: 0x000a, 0x3702: 0x000a, 0x3703: 0x000a, 0x3704: 0x000a, 0x3705: 0x000a, + 0x3706: 0x000a, 0x3707: 0x000a, 0x3708: 0x000a, 0x3709: 0x000a, 0x370a: 0x000a, 0x370b: 0x000a, + 0x370c: 0x000a, 0x370d: 0x000a, 0x370e: 0x000a, 0x370f: 0x000a, 0x3710: 0x000a, 0x3711: 0x000a, + 0x3712: 0x000a, 0x3713: 0x000a, 0x3714: 0x000a, 0x3715: 0x000a, 0x3716: 0x000a, 0x3717: 0x000a, + 0x3718: 0x000a, 0x3719: 0x000a, 0x371a: 0x000a, 0x371b: 0x000a, 0x371c: 0x000a, 0x371d: 0x000a, + 0x371e: 0x000a, 0x371f: 0x000a, 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, 0x3726: 0x000a, 0x3727: 0x000a, 0x3728: 0x000a, 0x3729: 0x000a, + 0x372a: 0x000a, 0x372b: 0x000a, + 0x3730: 0x000a, 0x3731: 0x000a, 0x3732: 0x000a, 0x3733: 0x000a, 0x3734: 0x000a, 0x3735: 0x000a, + 0x3736: 0x000a, 0x3737: 0x000a, 0x3738: 0x000a, 0x3739: 0x000a, 0x373a: 0x000a, 0x373b: 0x000a, + 0x373c: 0x000a, 0x373d: 0x000a, 0x373e: 0x000a, 0x373f: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, + 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, 0x376d: 0x000a, 0x376e: 0x000a, + 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, 0x377a: 0x000a, 0x377b: 0x000a, + 0x377c: 0x000a, 0x377d: 0x000a, 0x377e: 0x000a, 0x377f: 0x000a, + // Block 0xde, offset 0x3780 + 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, 0x3799: 0x000a, 0x379a: 0x000a, 0x379b: 0x000a, 0x379c: 0x000a, 0x379d: 0x000a, + 0x379e: 0x000a, 0x379f: 0x000a, 0x37a0: 0x000a, 0x37a1: 0x000a, 0x37a2: 0x000a, 0x37a3: 0x000a, + 0x37a4: 0x000a, 0x37a5: 0x000a, 0x37a6: 0x000a, 0x37a7: 0x000a, 0x37a8: 0x000a, 0x37a9: 0x000a, + 0x37aa: 0x000a, 0x37ab: 0x000a, 0x37ac: 0x000a, 0x37ad: 0x000a, 0x37ae: 0x000a, 0x37af: 0x000a, + 0x37b0: 0x000a, 0x37b1: 0x000a, 0x37b2: 0x000a, 0x37b3: 0x000a, 0x37b4: 0x000a, 0x37b5: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0002, 0x37c1: 0x0002, 0x37c2: 0x0002, 0x37c3: 0x0002, 0x37c4: 0x0002, 0x37c5: 0x0002, + 0x37c6: 0x0002, 0x37c7: 0x0002, 0x37c8: 0x0002, 0x37c9: 0x0002, 0x37ca: 0x0002, 0x37cb: 0x000a, + 0x37cc: 0x000a, + 0x37ef: 0x000a, + // Block 0xe0, offset 0x3800 + 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, + // Block 0xe1, offset 0x3840 + 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a, + 0x3864: 0x000a, 0x3865: 0x000a, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x388c: 0x000a, 0x388d: 0x000a, 0x388e: 0x000a, 0x388f: 0x000a, 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, + 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a, + 0x38d8: 0x000a, + 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x3910: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, 0x3923: 0x000a, + 0x3924: 0x000a, 0x3925: 0x000a, 0x3926: 0x000a, 0x3927: 0x000a, 0x3928: 0x000a, 0x3929: 0x000a, + 0x392a: 0x000a, 0x392b: 0x000a, 0x392c: 0x000a, 0x392d: 0x000a, 0x392e: 0x000a, 0x392f: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, 0x393a: 0x000a, 0x393b: 0x000a, + 0x393c: 0x000a, 0x393d: 0x000a, 0x393e: 0x000a, 0x393f: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, 0x3943: 0x000a, 0x3944: 0x000a, 0x3945: 0x000a, + 0x3946: 0x000a, 0x3947: 0x000a, + 0x3950: 0x000a, 0x3951: 0x000a, + 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a, + 0x3958: 0x000a, 0x3959: 0x000a, + 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a, + 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a, + 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a, + 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a, + 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a, + 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x3980: 0x000a, 0x3981: 0x000a, 0x3982: 0x000a, 0x3983: 0x000a, 0x3984: 0x000a, 0x3985: 0x000a, + 0x3986: 0x000a, 0x3987: 0x000a, + 0x3990: 0x000a, 0x3991: 0x000a, + 0x3992: 0x000a, 0x3993: 0x000a, 0x3994: 0x000a, 0x3995: 0x000a, 0x3996: 0x000a, 0x3997: 0x000a, + 0x3998: 0x000a, 0x3999: 0x000a, 0x399a: 0x000a, 0x399b: 0x000a, 0x399c: 0x000a, 0x399d: 0x000a, + 0x399e: 0x000a, 0x399f: 0x000a, 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a, + 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a, + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39c0: 0x000a, 0x39c1: 0x000a, 0x39c2: 0x000a, 0x39c3: 0x000a, 0x39c4: 0x000a, 0x39c5: 0x000a, + 0x39c6: 0x000a, 0x39c7: 0x000a, 0x39c8: 0x000a, 0x39c9: 0x000a, 0x39ca: 0x000a, 0x39cb: 0x000a, + 0x39cd: 0x000a, 0x39ce: 0x000a, 0x39cf: 0x000a, 0x39d0: 0x000a, 0x39d1: 0x000a, + 0x39d2: 0x000a, 0x39d3: 0x000a, 0x39d4: 0x000a, 0x39d5: 0x000a, 0x39d6: 0x000a, 0x39d7: 0x000a, + 0x39d8: 0x000a, 0x39d9: 0x000a, 0x39da: 0x000a, 0x39db: 0x000a, 0x39dc: 0x000a, 0x39dd: 0x000a, + 0x39de: 0x000a, 0x39df: 0x000a, 0x39e0: 0x000a, 0x39e1: 0x000a, 0x39e2: 0x000a, 0x39e3: 0x000a, + 0x39e4: 0x000a, 0x39e5: 0x000a, 0x39e6: 0x000a, 0x39e7: 0x000a, 0x39e8: 0x000a, 0x39e9: 0x000a, + 0x39ea: 0x000a, 0x39eb: 0x000a, 0x39ec: 0x000a, 0x39ed: 0x000a, 0x39ee: 0x000a, 0x39ef: 0x000a, + 0x39f0: 0x000a, 0x39f1: 0x000a, 0x39f2: 0x000a, 0x39f3: 0x000a, 0x39f4: 0x000a, 0x39f5: 0x000a, + 0x39f6: 0x000a, 0x39f7: 0x000a, 0x39f8: 0x000a, 0x39f9: 0x000a, 0x39fa: 0x000a, 0x39fb: 0x000a, + 0x39fc: 0x000a, 0x39fd: 0x000a, 0x39fe: 0x000a, 0x39ff: 0x000a, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x000a, 0x3a01: 0x000a, 0x3a02: 0x000a, 0x3a03: 0x000a, 0x3a04: 0x000a, 0x3a05: 0x000a, + 0x3a06: 0x000a, 0x3a07: 0x000a, 0x3a08: 0x000a, 0x3a09: 0x000a, 0x3a0a: 0x000a, 0x3a0b: 0x000a, + 0x3a0c: 0x000a, 0x3a0d: 0x000a, 0x3a0e: 0x000a, 0x3a0f: 0x000a, 0x3a10: 0x000a, 0x3a11: 0x000a, + 0x3a12: 0x000a, 0x3a13: 0x000a, 0x3a14: 0x000a, 0x3a15: 0x000a, 0x3a16: 0x000a, 0x3a17: 0x000a, + 0x3a18: 0x000a, 0x3a19: 0x000a, 0x3a1a: 0x000a, 0x3a1b: 0x000a, 0x3a1c: 0x000a, 0x3a1d: 0x000a, + 0x3a1e: 0x000a, 0x3a1f: 0x000a, 0x3a20: 0x000a, 0x3a21: 0x000a, 0x3a22: 0x000a, 0x3a23: 0x000a, + 0x3a24: 0x000a, 0x3a25: 0x000a, 0x3a26: 0x000a, 0x3a27: 0x000a, 0x3a28: 0x000a, 0x3a29: 0x000a, + 0x3a2a: 0x000a, 0x3a2b: 0x000a, 0x3a2c: 0x000a, 0x3a2d: 0x000a, 0x3a2e: 0x000a, 0x3a2f: 0x000a, + 0x3a30: 0x000a, 0x3a31: 0x000a, 0x3a33: 0x000a, 0x3a34: 0x000a, 0x3a35: 0x000a, + 0x3a36: 0x000a, 0x3a3a: 0x000a, 0x3a3b: 0x000a, + 0x3a3c: 0x000a, 0x3a3d: 0x000a, 0x3a3e: 0x000a, 0x3a3f: 0x000a, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000a, 0x3a41: 0x000a, 0x3a42: 0x000a, 0x3a43: 0x000a, 0x3a44: 0x000a, 0x3a45: 0x000a, + 0x3a46: 0x000a, 0x3a47: 0x000a, 0x3a48: 0x000a, 0x3a49: 0x000a, 0x3a4a: 0x000a, 0x3a4b: 0x000a, + 0x3a4c: 0x000a, 0x3a4d: 0x000a, 0x3a4e: 0x000a, 0x3a4f: 0x000a, 0x3a50: 0x000a, 0x3a51: 0x000a, + 0x3a52: 0x000a, 0x3a53: 0x000a, 0x3a54: 0x000a, 0x3a55: 0x000a, 0x3a56: 0x000a, 0x3a57: 0x000a, + 0x3a58: 0x000a, 0x3a59: 0x000a, 0x3a5a: 0x000a, 0x3a5b: 0x000a, 0x3a5c: 0x000a, 0x3a5d: 0x000a, + 0x3a5e: 0x000a, 0x3a5f: 0x000a, 0x3a60: 0x000a, 0x3a61: 0x000a, 0x3a62: 0x000a, + 0x3a65: 0x000a, 0x3a66: 0x000a, 0x3a67: 0x000a, 0x3a68: 0x000a, 0x3a69: 0x000a, + 0x3a6a: 0x000a, 0x3a6e: 0x000a, 0x3a6f: 0x000a, + 0x3a70: 0x000a, 0x3a71: 0x000a, 0x3a72: 0x000a, 0x3a73: 0x000a, 0x3a74: 0x000a, 0x3a75: 0x000a, + 0x3a76: 0x000a, 0x3a77: 0x000a, 0x3a78: 0x000a, 0x3a79: 0x000a, 0x3a7a: 0x000a, 0x3a7b: 0x000a, + 0x3a7c: 0x000a, 0x3a7d: 0x000a, 0x3a7e: 0x000a, 0x3a7f: 0x000a, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x000a, 0x3a81: 0x000a, 0x3a82: 0x000a, 0x3a83: 0x000a, 0x3a84: 0x000a, 0x3a85: 0x000a, + 0x3a86: 0x000a, 0x3a87: 0x000a, 0x3a88: 0x000a, 0x3a89: 0x000a, 0x3a8a: 0x000a, + 0x3a8d: 0x000a, 0x3a8e: 0x000a, 0x3a8f: 0x000a, 0x3a90: 0x000a, 0x3a91: 0x000a, + 0x3a92: 0x000a, 0x3a93: 0x000a, 0x3a94: 0x000a, 0x3a95: 0x000a, 0x3a96: 0x000a, 0x3a97: 0x000a, + 0x3a98: 0x000a, 0x3a99: 0x000a, 0x3a9a: 0x000a, 0x3a9b: 0x000a, 0x3a9c: 0x000a, 0x3a9d: 0x000a, + 0x3a9e: 0x000a, 0x3a9f: 0x000a, 0x3aa0: 0x000a, 0x3aa1: 0x000a, 0x3aa2: 0x000a, 0x3aa3: 0x000a, + 0x3aa4: 0x000a, 0x3aa5: 0x000a, 0x3aa6: 0x000a, 0x3aa7: 0x000a, 0x3aa8: 0x000a, 0x3aa9: 0x000a, + 0x3aaa: 0x000a, 0x3aab: 0x000a, 0x3aac: 0x000a, 0x3aad: 0x000a, 0x3aae: 0x000a, 0x3aaf: 0x000a, + 0x3ab0: 0x000a, 0x3ab1: 0x000a, 0x3ab2: 0x000a, 0x3ab3: 0x000a, 0x3ab4: 0x000a, 0x3ab5: 0x000a, + 0x3ab6: 0x000a, 0x3ab7: 0x000a, 0x3ab8: 0x000a, 0x3ab9: 0x000a, 0x3aba: 0x000a, 0x3abb: 0x000a, + 0x3abc: 0x000a, 0x3abd: 0x000a, 0x3abe: 0x000a, 0x3abf: 0x000a, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000a, 0x3ac1: 0x000a, 0x3ac2: 0x000a, 0x3ac3: 0x000a, 0x3ac4: 0x000a, 0x3ac5: 0x000a, + 0x3ac6: 0x000a, 0x3ac7: 0x000a, 0x3ac8: 0x000a, 0x3ac9: 0x000a, 0x3aca: 0x000a, 0x3acb: 0x000a, + 0x3acc: 0x000a, 0x3acd: 0x000a, 0x3ace: 0x000a, 0x3acf: 0x000a, 0x3ad0: 0x000a, 0x3ad1: 0x000a, + 0x3ad2: 0x000a, 0x3ad3: 0x000a, + 0x3ae0: 0x000a, 0x3ae1: 0x000a, 0x3ae2: 0x000a, 0x3ae3: 0x000a, + 0x3ae4: 0x000a, 0x3ae5: 0x000a, 0x3ae6: 0x000a, 0x3ae7: 0x000a, 0x3ae8: 0x000a, 0x3ae9: 0x000a, + 0x3aea: 0x000a, 0x3aeb: 0x000a, 0x3aec: 0x000a, 0x3aed: 0x000a, + 0x3af0: 0x000a, 0x3af1: 0x000a, 0x3af2: 0x000a, 0x3af3: 0x000a, + 0x3af8: 0x000a, 0x3af9: 0x000a, 0x3afa: 0x000a, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x000a, 0x3b01: 0x000a, 0x3b02: 0x000a, + 0x3b10: 0x000a, 0x3b11: 0x000a, + 0x3b12: 0x000a, 0x3b13: 0x000a, 0x3b14: 0x000a, 0x3b15: 0x000a, + // Block 0xed, offset 0x3b40 + 0x3b7e: 0x000b, 0x3b7f: 0x000b, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000b, 0x3b81: 0x000b, 0x3b82: 0x000b, 0x3b83: 0x000b, 0x3b84: 0x000b, 0x3b85: 0x000b, + 0x3b86: 0x000b, 0x3b87: 0x000b, 0x3b88: 0x000b, 0x3b89: 0x000b, 0x3b8a: 0x000b, 0x3b8b: 0x000b, + 0x3b8c: 0x000b, 0x3b8d: 0x000b, 0x3b8e: 0x000b, 0x3b8f: 0x000b, 0x3b90: 0x000b, 0x3b91: 0x000b, + 0x3b92: 0x000b, 0x3b93: 0x000b, 0x3b94: 0x000b, 0x3b95: 0x000b, 0x3b96: 0x000b, 0x3b97: 0x000b, + 0x3b98: 0x000b, 0x3b99: 0x000b, 0x3b9a: 0x000b, 0x3b9b: 0x000b, 0x3b9c: 0x000b, 0x3b9d: 0x000b, + 0x3b9e: 0x000b, 0x3b9f: 0x000b, 0x3ba0: 0x000b, 0x3ba1: 0x000b, 0x3ba2: 0x000b, 0x3ba3: 0x000b, + 0x3ba4: 0x000b, 0x3ba5: 0x000b, 0x3ba6: 0x000b, 0x3ba7: 0x000b, 0x3ba8: 0x000b, 0x3ba9: 0x000b, + 0x3baa: 0x000b, 0x3bab: 0x000b, 0x3bac: 0x000b, 0x3bad: 0x000b, 0x3bae: 0x000b, 0x3baf: 0x000b, + 0x3bb0: 0x000b, 0x3bb1: 0x000b, 0x3bb2: 0x000b, 0x3bb3: 0x000b, 0x3bb4: 0x000b, 0x3bb5: 0x000b, + 0x3bb6: 0x000b, 0x3bb7: 0x000b, 0x3bb8: 0x000b, 0x3bb9: 0x000b, 0x3bba: 0x000b, 0x3bbb: 0x000b, + 0x3bbc: 0x000b, 0x3bbd: 0x000b, 0x3bbe: 0x000b, 0x3bbf: 0x000b, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000c, 0x3bc1: 0x000c, 0x3bc2: 0x000c, 0x3bc3: 0x000c, 0x3bc4: 0x000c, 0x3bc5: 0x000c, + 0x3bc6: 0x000c, 0x3bc7: 0x000c, 0x3bc8: 0x000c, 0x3bc9: 0x000c, 0x3bca: 0x000c, 0x3bcb: 0x000c, + 0x3bcc: 0x000c, 0x3bcd: 0x000c, 0x3bce: 0x000c, 0x3bcf: 0x000c, 0x3bd0: 0x000c, 0x3bd1: 0x000c, + 0x3bd2: 0x000c, 0x3bd3: 0x000c, 0x3bd4: 0x000c, 0x3bd5: 0x000c, 0x3bd6: 0x000c, 0x3bd7: 0x000c, + 0x3bd8: 0x000c, 0x3bd9: 0x000c, 0x3bda: 0x000c, 0x3bdb: 0x000c, 0x3bdc: 0x000c, 0x3bdd: 0x000c, + 0x3bde: 0x000c, 0x3bdf: 0x000c, 0x3be0: 0x000c, 0x3be1: 0x000c, 0x3be2: 0x000c, 0x3be3: 0x000c, + 0x3be4: 0x000c, 0x3be5: 0x000c, 0x3be6: 0x000c, 0x3be7: 0x000c, 0x3be8: 0x000c, 0x3be9: 0x000c, + 0x3bea: 0x000c, 0x3beb: 0x000c, 0x3bec: 0x000c, 0x3bed: 0x000c, 0x3bee: 0x000c, 0x3bef: 0x000c, + 0x3bf0: 0x000b, 0x3bf1: 0x000b, 0x3bf2: 0x000b, 0x3bf3: 0x000b, 0x3bf4: 0x000b, 0x3bf5: 0x000b, + 0x3bf6: 0x000b, 0x3bf7: 0x000b, 0x3bf8: 0x000b, 0x3bf9: 0x000b, 0x3bfa: 0x000b, 0x3bfb: 0x000b, + 0x3bfc: 0x000b, 0x3bfd: 0x000b, 0x3bfe: 0x000b, 0x3bff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x54, + 0x1b3: 0x64, 0x1b5: 0x65, 0x1b7: 0x66, + 0x1b8: 0x67, 0x1b9: 0x68, 0x1ba: 0x69, 0x1bb: 0x6a, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6b, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6c, 0x1c2: 0x6d, 0x1c3: 0x6e, 0x1c7: 0x6f, + 0x1c8: 0x70, 0x1c9: 0x71, 0x1ca: 0x72, 0x1cb: 0x73, 0x1cd: 0x74, 0x1cf: 0x75, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x76, 0x253: 0x77, + 0x258: 0x78, 0x259: 0x79, 0x25a: 0x7a, 0x25b: 0x7b, 0x25c: 0x7c, 0x25e: 0x7d, + 0x260: 0x7e, 0x261: 0x7f, 0x263: 0x80, 0x264: 0x81, 0x265: 0x82, 0x266: 0x83, 0x267: 0x84, + 0x268: 0x85, 0x269: 0x86, 0x26a: 0x87, 0x26b: 0x88, 0x26f: 0x89, + // Block 0xa, offset 0x280 + 0x2ac: 0x8a, 0x2ad: 0x8b, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8c, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8d, + 0x2b8: 0x8e, 0x2b9: 0x8f, 0x2ba: 0x0e, 0x2bb: 0x90, 0x2bc: 0x91, 0x2bd: 0x92, 0x2bf: 0x93, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x94, 0x2c5: 0x54, 0x2c6: 0x95, 0x2c7: 0x96, + 0x2cb: 0x97, 0x2cd: 0x98, + 0x2e0: 0x99, 0x2e1: 0x99, 0x2e2: 0x99, 0x2e3: 0x99, 0x2e4: 0x9a, 0x2e5: 0x99, 0x2e6: 0x99, 0x2e7: 0x99, + 0x2e8: 0x9b, 0x2e9: 0x99, 0x2ea: 0x99, 0x2eb: 0x9c, 0x2ec: 0x9d, 0x2ed: 0x99, 0x2ee: 0x99, 0x2ef: 0x99, + 0x2f0: 0x99, 0x2f1: 0x99, 0x2f2: 0x99, 0x2f3: 0x99, 0x2f4: 0x9e, 0x2f5: 0x99, 0x2f6: 0x99, 0x2f7: 0x99, + 0x2f8: 0x99, 0x2f9: 0x9f, 0x2fa: 0x99, 0x2fb: 0x99, 0x2fc: 0xa0, 0x2fd: 0xa1, 0x2fe: 0x99, 0x2ff: 0x99, + // Block 0xc, offset 0x300 + 0x300: 0xa2, 0x301: 0xa3, 0x302: 0xa4, 0x304: 0xa5, 0x305: 0xa6, 0x306: 0xa7, 0x307: 0xa8, + 0x308: 0xa9, 0x30b: 0xaa, 0x30c: 0x26, 0x30d: 0xab, + 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, + 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, + 0x320: 0xb6, 0x327: 0xb7, + 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba, + 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf, + 0x33b: 0xc0, 0x33f: 0xc1, + // Block 0xd, offset 0x340 + 0x36b: 0xc2, 0x36c: 0xc3, + 0x37d: 0xc4, 0x37e: 0xc5, 0x37f: 0xc6, + // Block 0xe, offset 0x380 + 0x3b2: 0xc7, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc8, 0x3c6: 0xc9, + 0x3c8: 0x54, 0x3c9: 0xca, 0x3cc: 0x54, 0x3cd: 0xcb, + 0x3db: 0xcc, 0x3dc: 0xcd, 0x3dd: 0xce, 0x3de: 0xcf, 0x3df: 0xd0, + 0x3e8: 0xd1, 0x3e9: 0xd2, 0x3ea: 0xd3, + // Block 0x10, offset 0x400 + 0x400: 0xd4, 0x404: 0xc3, + 0x40b: 0xd5, + 0x420: 0x99, 0x421: 0x99, 0x422: 0x99, 0x423: 0xd6, 0x424: 0x99, 0x425: 0xd7, 0x426: 0x99, 0x427: 0x99, + 0x428: 0x99, 0x429: 0x99, 0x42a: 0x99, 0x42b: 0x99, 0x42c: 0x99, 0x42d: 0x99, 0x42e: 0x99, 0x42f: 0x99, + 0x430: 0x99, 0x431: 0xa0, 0x432: 0x0e, 0x433: 0x99, 0x434: 0x0e, 0x435: 0xd8, 0x436: 0x99, 0x437: 0x99, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd9, 0x43c: 0x99, 0x43d: 0x99, 0x43e: 0x99, 0x43f: 0x99, + // Block 0x11, offset 0x440 + 0x440: 0xda, 0x441: 0x54, 0x442: 0xdb, 0x443: 0xdc, 0x444: 0xdd, 0x445: 0xde, + 0x449: 0xdf, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xe0, 0x45c: 0x54, 0x45d: 0x6a, 0x45e: 0x54, 0x45f: 0xe1, + 0x460: 0xe2, 0x461: 0xe3, 0x462: 0xe4, 0x464: 0xe5, 0x465: 0xe6, 0x466: 0xe7, 0x467: 0xe8, + 0x468: 0x54, 0x469: 0xe9, 0x46a: 0xea, + 0x47f: 0xeb, + // Block 0x12, offset 0x480 + 0x4bf: 0xeb, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xec, 0x541: 0xec, 0x542: 0xec, 0x543: 0xec, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xed, + 0x548: 0xec, 0x549: 0xec, 0x54a: 0xec, 0x54b: 0xec, 0x54c: 0xec, 0x54d: 0xec, 0x54e: 0xec, 0x54f: 0xec, + 0x550: 0xec, 0x551: 0xec, 0x552: 0xec, 0x553: 0xec, 0x554: 0xec, 0x555: 0xec, 0x556: 0xec, 0x557: 0xec, + 0x558: 0xec, 0x559: 0xec, 0x55a: 0xec, 0x55b: 0xec, 0x55c: 0xec, 0x55d: 0xec, 0x55e: 0xec, 0x55f: 0xec, + 0x560: 0xec, 0x561: 0xec, 0x562: 0xec, 0x563: 0xec, 0x564: 0xec, 0x565: 0xec, 0x566: 0xec, 0x567: 0xec, + 0x568: 0xec, 0x569: 0xec, 0x56a: 0xec, 0x56b: 0xec, 0x56c: 0xec, 0x56d: 0xec, 0x56e: 0xec, 0x56f: 0xec, + 0x570: 0xec, 0x571: 0xec, 0x572: 0xec, 0x573: 0xec, 0x574: 0xec, 0x575: 0xec, 0x576: 0xec, 0x577: 0xec, + 0x578: 0xec, 0x579: 0xec, 0x57a: 0xec, 0x57b: 0xec, 0x57c: 0xec, 0x57d: 0xec, 0x57e: 0xec, 0x57f: 0xec, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16952 bytes (16KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 7297cce3..2c58f09b 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go new file mode 100644 index 00000000..10f5202c --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -0,0 +1,7710 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.14 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "12.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2CA1 + endMulti = 0x2F63 + firstLeadingCCC = 0x49B1 + firstCCCZeroExcept = 0x4A7B + firstStarterWithNLead = 0x4AA2 + lastDecomp = 0x4AA4 + maxDecomp = 0x8000 +) + +// decomps: 19108 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x52, 0x42, 0x4D, 0x56, 0x42, + 0x4D, 0x57, 0x42, 0x4E, 0x4A, 0x42, 0x4E, 0x6A, + 0x42, 0x4E, 0x6F, 0x42, 0x50, 0x48, 0x42, 0x50, + 0x52, 0x42, 0x50, 0x61, 0x42, 0x52, 0x73, 0x42, + 0x53, 0x44, 0x42, 0x53, 0x4D, 0x42, 0x53, 0x53, + 0x42, 0x53, 0x76, 0x42, 0x54, 0x4D, 0x42, 0x56, + 0x49, 0x42, 0x57, 0x43, 0x42, 0x57, 0x5A, 0x42, + 0x57, 0x62, 0x42, 0x58, 0x49, 0x42, 0x63, 0x63, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x64, 0x42, 0x63, 0x6D, 0x42, 0x64, + 0x42, 0x42, 0x64, 0x61, 0x42, 0x64, 0x6C, 0x42, + 0x64, 0x6D, 0x42, 0x64, 0x7A, 0x42, 0x65, 0x56, + 0x42, 0x66, 0x66, 0x42, 0x66, 0x69, 0x42, 0x66, + 0x6C, 0x42, 0x66, 0x6D, 0x42, 0x68, 0x61, 0x42, + 0x69, 0x69, 0x42, 0x69, 0x6A, 0x42, 0x69, 0x6E, + 0x42, 0x69, 0x76, 0x42, 0x69, 0x78, 0x42, 0x6B, + 0x41, 0x42, 0x6B, 0x56, 0x42, 0x6B, 0x57, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x67, 0x42, 0x6B, 0x6C, 0x42, 0x6B, 0x6D, + 0x42, 0x6B, 0x74, 0x42, 0x6C, 0x6A, 0x42, 0x6C, + 0x6D, 0x42, 0x6C, 0x6E, 0x42, 0x6C, 0x78, 0x42, + 0x6D, 0x32, 0x42, 0x6D, 0x33, 0x42, 0x6D, 0x41, + 0x42, 0x6D, 0x56, 0x42, 0x6D, 0x57, 0x42, 0x6D, + 0x62, 0x42, 0x6D, 0x67, 0x42, 0x6D, 0x6C, 0x42, + 0x6D, 0x6D, 0x42, 0x6D, 0x73, 0x42, 0x6E, 0x41, + 0x42, 0x6E, 0x46, 0x42, 0x6E, 0x56, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x57, 0x42, 0x6E, 0x6A, 0x42, 0x6E, 0x6D, 0x42, + 0x6E, 0x73, 0x42, 0x6F, 0x56, 0x42, 0x70, 0x41, + 0x42, 0x70, 0x46, 0x42, 0x70, 0x56, 0x42, 0x70, + 0x57, 0x42, 0x70, 0x63, 0x42, 0x70, 0x73, 0x42, + 0x73, 0x72, 0x42, 0x73, 0x74, 0x42, 0x76, 0x69, + 0x42, 0x78, 0x69, 0x43, 0x28, 0x31, 0x29, 0x43, + 0x28, 0x32, 0x29, 0x43, 0x28, 0x33, 0x29, 0x43, + 0x28, 0x34, 0x29, 0x43, 0x28, 0x35, 0x29, 0x43, + // Bytes 1a80 - 1abf + 0x28, 0x36, 0x29, 0x43, 0x28, 0x37, 0x29, 0x43, + 0x28, 0x38, 0x29, 0x43, 0x28, 0x39, 0x29, 0x43, + 0x28, 0x41, 0x29, 0x43, 0x28, 0x42, 0x29, 0x43, + 0x28, 0x43, 0x29, 0x43, 0x28, 0x44, 0x29, 0x43, + 0x28, 0x45, 0x29, 0x43, 0x28, 0x46, 0x29, 0x43, + 0x28, 0x47, 0x29, 0x43, 0x28, 0x48, 0x29, 0x43, + 0x28, 0x49, 0x29, 0x43, 0x28, 0x4A, 0x29, 0x43, + 0x28, 0x4B, 0x29, 0x43, 0x28, 0x4C, 0x29, 0x43, + // Bytes 1ac0 - 1aff + 0x28, 0x4D, 0x29, 0x43, 0x28, 0x4E, 0x29, 0x43, + 0x28, 0x4F, 0x29, 0x43, 0x28, 0x50, 0x29, 0x43, + 0x28, 0x51, 0x29, 0x43, 0x28, 0x52, 0x29, 0x43, + 0x28, 0x53, 0x29, 0x43, 0x28, 0x54, 0x29, 0x43, + 0x28, 0x55, 0x29, 0x43, 0x28, 0x56, 0x29, 0x43, + 0x28, 0x57, 0x29, 0x43, 0x28, 0x58, 0x29, 0x43, + 0x28, 0x59, 0x29, 0x43, 0x28, 0x5A, 0x29, 0x43, + 0x28, 0x61, 0x29, 0x43, 0x28, 0x62, 0x29, 0x43, + // Bytes 1b00 - 1b3f + 0x28, 0x63, 0x29, 0x43, 0x28, 0x64, 0x29, 0x43, + 0x28, 0x65, 0x29, 0x43, 0x28, 0x66, 0x29, 0x43, + 0x28, 0x67, 0x29, 0x43, 0x28, 0x68, 0x29, 0x43, + 0x28, 0x69, 0x29, 0x43, 0x28, 0x6A, 0x29, 0x43, + 0x28, 0x6B, 0x29, 0x43, 0x28, 0x6C, 0x29, 0x43, + 0x28, 0x6D, 0x29, 0x43, 0x28, 0x6E, 0x29, 0x43, + 0x28, 0x6F, 0x29, 0x43, 0x28, 0x70, 0x29, 0x43, + 0x28, 0x71, 0x29, 0x43, 0x28, 0x72, 0x29, 0x43, + // Bytes 1b40 - 1b7f + 0x28, 0x73, 0x29, 0x43, 0x28, 0x74, 0x29, 0x43, + 0x28, 0x75, 0x29, 0x43, 0x28, 0x76, 0x29, 0x43, + 0x28, 0x77, 0x29, 0x43, 0x28, 0x78, 0x29, 0x43, + 0x28, 0x79, 0x29, 0x43, 0x28, 0x7A, 0x29, 0x43, + 0x2E, 0x2E, 0x2E, 0x43, 0x31, 0x30, 0x2E, 0x43, + 0x31, 0x31, 0x2E, 0x43, 0x31, 0x32, 0x2E, 0x43, + 0x31, 0x33, 0x2E, 0x43, 0x31, 0x34, 0x2E, 0x43, + 0x31, 0x35, 0x2E, 0x43, 0x31, 0x36, 0x2E, 0x43, + // Bytes 1b80 - 1bbf + 0x31, 0x37, 0x2E, 0x43, 0x31, 0x38, 0x2E, 0x43, + 0x31, 0x39, 0x2E, 0x43, 0x32, 0x30, 0x2E, 0x43, + 0x3A, 0x3A, 0x3D, 0x43, 0x3D, 0x3D, 0x3D, 0x43, + 0x43, 0x6F, 0x2E, 0x43, 0x46, 0x41, 0x58, 0x43, + 0x47, 0x48, 0x7A, 0x43, 0x47, 0x50, 0x61, 0x43, + 0x49, 0x49, 0x49, 0x43, 0x4C, 0x54, 0x44, 0x43, + 0x4C, 0xC2, 0xB7, 0x43, 0x4D, 0x48, 0x7A, 0x43, + 0x4D, 0x50, 0x61, 0x43, 0x4D, 0xCE, 0xA9, 0x43, + // Bytes 1bc0 - 1bff + 0x50, 0x50, 0x4D, 0x43, 0x50, 0x50, 0x56, 0x43, + 0x50, 0x54, 0x45, 0x43, 0x54, 0x45, 0x4C, 0x43, + 0x54, 0x48, 0x7A, 0x43, 0x56, 0x49, 0x49, 0x43, + 0x58, 0x49, 0x49, 0x43, 0x61, 0x2F, 0x63, 0x43, + 0x61, 0x2F, 0x73, 0x43, 0x61, 0xCA, 0xBE, 0x43, + 0x62, 0x61, 0x72, 0x43, 0x63, 0x2F, 0x6F, 0x43, + 0x63, 0x2F, 0x75, 0x43, 0x63, 0x61, 0x6C, 0x43, + 0x63, 0x6D, 0x32, 0x43, 0x63, 0x6D, 0x33, 0x43, + // Bytes 1c00 - 1c3f + 0x64, 0x6D, 0x32, 0x43, 0x64, 0x6D, 0x33, 0x43, + 0x65, 0x72, 0x67, 0x43, 0x66, 0x66, 0x69, 0x43, + 0x66, 0x66, 0x6C, 0x43, 0x67, 0x61, 0x6C, 0x43, + 0x68, 0x50, 0x61, 0x43, 0x69, 0x69, 0x69, 0x43, + 0x6B, 0x48, 0x7A, 0x43, 0x6B, 0x50, 0x61, 0x43, + 0x6B, 0x6D, 0x32, 0x43, 0x6B, 0x6D, 0x33, 0x43, + 0x6B, 0xCE, 0xA9, 0x43, 0x6C, 0x6F, 0x67, 0x43, + 0x6C, 0xC2, 0xB7, 0x43, 0x6D, 0x69, 0x6C, 0x43, + // Bytes 1c40 - 1c7f + 0x6D, 0x6D, 0x32, 0x43, 0x6D, 0x6D, 0x33, 0x43, + 0x6D, 0x6F, 0x6C, 0x43, 0x72, 0x61, 0x64, 0x43, + 0x76, 0x69, 0x69, 0x43, 0x78, 0x69, 0x69, 0x43, + 0xC2, 0xB0, 0x43, 0x43, 0xC2, 0xB0, 0x46, 0x43, + 0xCA, 0xBC, 0x6E, 0x43, 0xCE, 0xBC, 0x41, 0x43, + 0xCE, 0xBC, 0x46, 0x43, 0xCE, 0xBC, 0x56, 0x43, + 0xCE, 0xBC, 0x57, 0x43, 0xCE, 0xBC, 0x67, 0x43, + 0xCE, 0xBC, 0x6C, 0x43, 0xCE, 0xBC, 0x6D, 0x43, + // Bytes 1c80 - 1cbf + 0xCE, 0xBC, 0x73, 0x44, 0x28, 0x31, 0x30, 0x29, + 0x44, 0x28, 0x31, 0x31, 0x29, 0x44, 0x28, 0x31, + 0x32, 0x29, 0x44, 0x28, 0x31, 0x33, 0x29, 0x44, + 0x28, 0x31, 0x34, 0x29, 0x44, 0x28, 0x31, 0x35, + 0x29, 0x44, 0x28, 0x31, 0x36, 0x29, 0x44, 0x28, + 0x31, 0x37, 0x29, 0x44, 0x28, 0x31, 0x38, 0x29, + 0x44, 0x28, 0x31, 0x39, 0x29, 0x44, 0x28, 0x32, + 0x30, 0x29, 0x44, 0x30, 0xE7, 0x82, 0xB9, 0x44, + // Bytes 1cc0 - 1cff + 0x31, 0xE2, 0x81, 0x84, 0x44, 0x31, 0xE6, 0x97, + 0xA5, 0x44, 0x31, 0xE6, 0x9C, 0x88, 0x44, 0x31, + 0xE7, 0x82, 0xB9, 0x44, 0x32, 0xE6, 0x97, 0xA5, + 0x44, 0x32, 0xE6, 0x9C, 0x88, 0x44, 0x32, 0xE7, + 0x82, 0xB9, 0x44, 0x33, 0xE6, 0x97, 0xA5, 0x44, + 0x33, 0xE6, 0x9C, 0x88, 0x44, 0x33, 0xE7, 0x82, + 0xB9, 0x44, 0x34, 0xE6, 0x97, 0xA5, 0x44, 0x34, + 0xE6, 0x9C, 0x88, 0x44, 0x34, 0xE7, 0x82, 0xB9, + // Bytes 1d00 - 1d3f + 0x44, 0x35, 0xE6, 0x97, 0xA5, 0x44, 0x35, 0xE6, + 0x9C, 0x88, 0x44, 0x35, 0xE7, 0x82, 0xB9, 0x44, + 0x36, 0xE6, 0x97, 0xA5, 0x44, 0x36, 0xE6, 0x9C, + 0x88, 0x44, 0x36, 0xE7, 0x82, 0xB9, 0x44, 0x37, + 0xE6, 0x97, 0xA5, 0x44, 0x37, 0xE6, 0x9C, 0x88, + 0x44, 0x37, 0xE7, 0x82, 0xB9, 0x44, 0x38, 0xE6, + 0x97, 0xA5, 0x44, 0x38, 0xE6, 0x9C, 0x88, 0x44, + 0x38, 0xE7, 0x82, 0xB9, 0x44, 0x39, 0xE6, 0x97, + // Bytes 1d40 - 1d7f + 0xA5, 0x44, 0x39, 0xE6, 0x9C, 0x88, 0x44, 0x39, + 0xE7, 0x82, 0xB9, 0x44, 0x56, 0x49, 0x49, 0x49, + 0x44, 0x61, 0x2E, 0x6D, 0x2E, 0x44, 0x6B, 0x63, + 0x61, 0x6C, 0x44, 0x70, 0x2E, 0x6D, 0x2E, 0x44, + 0x76, 0x69, 0x69, 0x69, 0x44, 0xD5, 0xA5, 0xD6, + 0x82, 0x44, 0xD5, 0xB4, 0xD5, 0xA5, 0x44, 0xD5, + 0xB4, 0xD5, 0xAB, 0x44, 0xD5, 0xB4, 0xD5, 0xAD, + 0x44, 0xD5, 0xB4, 0xD5, 0xB6, 0x44, 0xD5, 0xBE, + // Bytes 1d80 - 1dbf + 0xD5, 0xB6, 0x44, 0xD7, 0x90, 0xD7, 0x9C, 0x44, + 0xD8, 0xA7, 0xD9, 0xB4, 0x44, 0xD8, 0xA8, 0xD8, + 0xAC, 0x44, 0xD8, 0xA8, 0xD8, 0xAD, 0x44, 0xD8, + 0xA8, 0xD8, 0xAE, 0x44, 0xD8, 0xA8, 0xD8, 0xB1, + 0x44, 0xD8, 0xA8, 0xD8, 0xB2, 0x44, 0xD8, 0xA8, + 0xD9, 0x85, 0x44, 0xD8, 0xA8, 0xD9, 0x86, 0x44, + 0xD8, 0xA8, 0xD9, 0x87, 0x44, 0xD8, 0xA8, 0xD9, + 0x89, 0x44, 0xD8, 0xA8, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1dc0 - 1dff + 0xAA, 0xD8, 0xAC, 0x44, 0xD8, 0xAA, 0xD8, 0xAD, + 0x44, 0xD8, 0xAA, 0xD8, 0xAE, 0x44, 0xD8, 0xAA, + 0xD8, 0xB1, 0x44, 0xD8, 0xAA, 0xD8, 0xB2, 0x44, + 0xD8, 0xAA, 0xD9, 0x85, 0x44, 0xD8, 0xAA, 0xD9, + 0x86, 0x44, 0xD8, 0xAA, 0xD9, 0x87, 0x44, 0xD8, + 0xAA, 0xD9, 0x89, 0x44, 0xD8, 0xAA, 0xD9, 0x8A, + 0x44, 0xD8, 0xAB, 0xD8, 0xAC, 0x44, 0xD8, 0xAB, + 0xD8, 0xB1, 0x44, 0xD8, 0xAB, 0xD8, 0xB2, 0x44, + // Bytes 1e00 - 1e3f + 0xD8, 0xAB, 0xD9, 0x85, 0x44, 0xD8, 0xAB, 0xD9, + 0x86, 0x44, 0xD8, 0xAB, 0xD9, 0x87, 0x44, 0xD8, + 0xAB, 0xD9, 0x89, 0x44, 0xD8, 0xAB, 0xD9, 0x8A, + 0x44, 0xD8, 0xAC, 0xD8, 0xAD, 0x44, 0xD8, 0xAC, + 0xD9, 0x85, 0x44, 0xD8, 0xAC, 0xD9, 0x89, 0x44, + 0xD8, 0xAC, 0xD9, 0x8A, 0x44, 0xD8, 0xAD, 0xD8, + 0xAC, 0x44, 0xD8, 0xAD, 0xD9, 0x85, 0x44, 0xD8, + 0xAD, 0xD9, 0x89, 0x44, 0xD8, 0xAD, 0xD9, 0x8A, + // Bytes 1e40 - 1e7f + 0x44, 0xD8, 0xAE, 0xD8, 0xAC, 0x44, 0xD8, 0xAE, + 0xD8, 0xAD, 0x44, 0xD8, 0xAE, 0xD9, 0x85, 0x44, + 0xD8, 0xAE, 0xD9, 0x89, 0x44, 0xD8, 0xAE, 0xD9, + 0x8A, 0x44, 0xD8, 0xB3, 0xD8, 0xAC, 0x44, 0xD8, + 0xB3, 0xD8, 0xAD, 0x44, 0xD8, 0xB3, 0xD8, 0xAE, + 0x44, 0xD8, 0xB3, 0xD8, 0xB1, 0x44, 0xD8, 0xB3, + 0xD9, 0x85, 0x44, 0xD8, 0xB3, 0xD9, 0x87, 0x44, + 0xD8, 0xB3, 0xD9, 0x89, 0x44, 0xD8, 0xB3, 0xD9, + // Bytes 1e80 - 1ebf + 0x8A, 0x44, 0xD8, 0xB4, 0xD8, 0xAC, 0x44, 0xD8, + 0xB4, 0xD8, 0xAD, 0x44, 0xD8, 0xB4, 0xD8, 0xAE, + 0x44, 0xD8, 0xB4, 0xD8, 0xB1, 0x44, 0xD8, 0xB4, + 0xD9, 0x85, 0x44, 0xD8, 0xB4, 0xD9, 0x87, 0x44, + 0xD8, 0xB4, 0xD9, 0x89, 0x44, 0xD8, 0xB4, 0xD9, + 0x8A, 0x44, 0xD8, 0xB5, 0xD8, 0xAD, 0x44, 0xD8, + 0xB5, 0xD8, 0xAE, 0x44, 0xD8, 0xB5, 0xD8, 0xB1, + 0x44, 0xD8, 0xB5, 0xD9, 0x85, 0x44, 0xD8, 0xB5, + // Bytes 1ec0 - 1eff + 0xD9, 0x89, 0x44, 0xD8, 0xB5, 0xD9, 0x8A, 0x44, + 0xD8, 0xB6, 0xD8, 0xAC, 0x44, 0xD8, 0xB6, 0xD8, + 0xAD, 0x44, 0xD8, 0xB6, 0xD8, 0xAE, 0x44, 0xD8, + 0xB6, 0xD8, 0xB1, 0x44, 0xD8, 0xB6, 0xD9, 0x85, + 0x44, 0xD8, 0xB6, 0xD9, 0x89, 0x44, 0xD8, 0xB6, + 0xD9, 0x8A, 0x44, 0xD8, 0xB7, 0xD8, 0xAD, 0x44, + 0xD8, 0xB7, 0xD9, 0x85, 0x44, 0xD8, 0xB7, 0xD9, + 0x89, 0x44, 0xD8, 0xB7, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1f00 - 1f3f + 0xB8, 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD8, 0xAC, + 0x44, 0xD8, 0xB9, 0xD9, 0x85, 0x44, 0xD8, 0xB9, + 0xD9, 0x89, 0x44, 0xD8, 0xB9, 0xD9, 0x8A, 0x44, + 0xD8, 0xBA, 0xD8, 0xAC, 0x44, 0xD8, 0xBA, 0xD9, + 0x85, 0x44, 0xD8, 0xBA, 0xD9, 0x89, 0x44, 0xD8, + 0xBA, 0xD9, 0x8A, 0x44, 0xD9, 0x81, 0xD8, 0xAC, + 0x44, 0xD9, 0x81, 0xD8, 0xAD, 0x44, 0xD9, 0x81, + 0xD8, 0xAE, 0x44, 0xD9, 0x81, 0xD9, 0x85, 0x44, + // Bytes 1f40 - 1f7f + 0xD9, 0x81, 0xD9, 0x89, 0x44, 0xD9, 0x81, 0xD9, + 0x8A, 0x44, 0xD9, 0x82, 0xD8, 0xAD, 0x44, 0xD9, + 0x82, 0xD9, 0x85, 0x44, 0xD9, 0x82, 0xD9, 0x89, + 0x44, 0xD9, 0x82, 0xD9, 0x8A, 0x44, 0xD9, 0x83, + 0xD8, 0xA7, 0x44, 0xD9, 0x83, 0xD8, 0xAC, 0x44, + 0xD9, 0x83, 0xD8, 0xAD, 0x44, 0xD9, 0x83, 0xD8, + 0xAE, 0x44, 0xD9, 0x83, 0xD9, 0x84, 0x44, 0xD9, + 0x83, 0xD9, 0x85, 0x44, 0xD9, 0x83, 0xD9, 0x89, + // Bytes 1f80 - 1fbf + 0x44, 0xD9, 0x83, 0xD9, 0x8A, 0x44, 0xD9, 0x84, + 0xD8, 0xA7, 0x44, 0xD9, 0x84, 0xD8, 0xAC, 0x44, + 0xD9, 0x84, 0xD8, 0xAD, 0x44, 0xD9, 0x84, 0xD8, + 0xAE, 0x44, 0xD9, 0x84, 0xD9, 0x85, 0x44, 0xD9, + 0x84, 0xD9, 0x87, 0x44, 0xD9, 0x84, 0xD9, 0x89, + 0x44, 0xD9, 0x84, 0xD9, 0x8A, 0x44, 0xD9, 0x85, + 0xD8, 0xA7, 0x44, 0xD9, 0x85, 0xD8, 0xAC, 0x44, + 0xD9, 0x85, 0xD8, 0xAD, 0x44, 0xD9, 0x85, 0xD8, + // Bytes 1fc0 - 1fff + 0xAE, 0x44, 0xD9, 0x85, 0xD9, 0x85, 0x44, 0xD9, + 0x85, 0xD9, 0x89, 0x44, 0xD9, 0x85, 0xD9, 0x8A, + 0x44, 0xD9, 0x86, 0xD8, 0xAC, 0x44, 0xD9, 0x86, + 0xD8, 0xAD, 0x44, 0xD9, 0x86, 0xD8, 0xAE, 0x44, + 0xD9, 0x86, 0xD8, 0xB1, 0x44, 0xD9, 0x86, 0xD8, + 0xB2, 0x44, 0xD9, 0x86, 0xD9, 0x85, 0x44, 0xD9, + 0x86, 0xD9, 0x86, 0x44, 0xD9, 0x86, 0xD9, 0x87, + 0x44, 0xD9, 0x86, 0xD9, 0x89, 0x44, 0xD9, 0x86, + // Bytes 2000 - 203f + 0xD9, 0x8A, 0x44, 0xD9, 0x87, 0xD8, 0xAC, 0x44, + 0xD9, 0x87, 0xD9, 0x85, 0x44, 0xD9, 0x87, 0xD9, + 0x89, 0x44, 0xD9, 0x87, 0xD9, 0x8A, 0x44, 0xD9, + 0x88, 0xD9, 0xB4, 0x44, 0xD9, 0x8A, 0xD8, 0xAC, + 0x44, 0xD9, 0x8A, 0xD8, 0xAD, 0x44, 0xD9, 0x8A, + 0xD8, 0xAE, 0x44, 0xD9, 0x8A, 0xD8, 0xB1, 0x44, + 0xD9, 0x8A, 0xD8, 0xB2, 0x44, 0xD9, 0x8A, 0xD9, + 0x85, 0x44, 0xD9, 0x8A, 0xD9, 0x86, 0x44, 0xD9, + // Bytes 2040 - 207f + 0x8A, 0xD9, 0x87, 0x44, 0xD9, 0x8A, 0xD9, 0x89, + 0x44, 0xD9, 0x8A, 0xD9, 0x8A, 0x44, 0xD9, 0x8A, + 0xD9, 0xB4, 0x44, 0xDB, 0x87, 0xD9, 0xB4, 0x45, + 0x28, 0xE1, 0x84, 0x80, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x82, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x83, + 0x29, 0x45, 0x28, 0xE1, 0x84, 0x85, 0x29, 0x45, + 0x28, 0xE1, 0x84, 0x86, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x87, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x89, + // Bytes 2080 - 20bf + 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8B, 0x29, 0x45, + 0x28, 0xE1, 0x84, 0x8C, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x8E, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8F, + 0x29, 0x45, 0x28, 0xE1, 0x84, 0x90, 0x29, 0x45, + 0x28, 0xE1, 0x84, 0x91, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x92, 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x80, + 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x83, 0x29, 0x45, + 0x28, 0xE4, 0xB8, 0x89, 0x29, 0x45, 0x28, 0xE4, + // Bytes 20c0 - 20ff + 0xB9, 0x9D, 0x29, 0x45, 0x28, 0xE4, 0xBA, 0x8C, + 0x29, 0x45, 0x28, 0xE4, 0xBA, 0x94, 0x29, 0x45, + 0x28, 0xE4, 0xBB, 0xA3, 0x29, 0x45, 0x28, 0xE4, + 0xBC, 0x81, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x91, + 0x29, 0x45, 0x28, 0xE5, 0x85, 0xAB, 0x29, 0x45, + 0x28, 0xE5, 0x85, 0xAD, 0x29, 0x45, 0x28, 0xE5, + 0x8A, 0xB4, 0x29, 0x45, 0x28, 0xE5, 0x8D, 0x81, + 0x29, 0x45, 0x28, 0xE5, 0x8D, 0x94, 0x29, 0x45, + // Bytes 2100 - 213f + 0x28, 0xE5, 0x90, 0x8D, 0x29, 0x45, 0x28, 0xE5, + 0x91, 0xBC, 0x29, 0x45, 0x28, 0xE5, 0x9B, 0x9B, + 0x29, 0x45, 0x28, 0xE5, 0x9C, 0x9F, 0x29, 0x45, + 0x28, 0xE5, 0xAD, 0xA6, 0x29, 0x45, 0x28, 0xE6, + 0x97, 0xA5, 0x29, 0x45, 0x28, 0xE6, 0x9C, 0x88, + 0x29, 0x45, 0x28, 0xE6, 0x9C, 0x89, 0x29, 0x45, + 0x28, 0xE6, 0x9C, 0xA8, 0x29, 0x45, 0x28, 0xE6, + 0xA0, 0xAA, 0x29, 0x45, 0x28, 0xE6, 0xB0, 0xB4, + // Bytes 2140 - 217f + 0x29, 0x45, 0x28, 0xE7, 0x81, 0xAB, 0x29, 0x45, + 0x28, 0xE7, 0x89, 0xB9, 0x29, 0x45, 0x28, 0xE7, + 0x9B, 0xA3, 0x29, 0x45, 0x28, 0xE7, 0xA4, 0xBE, + 0x29, 0x45, 0x28, 0xE7, 0xA5, 0x9D, 0x29, 0x45, + 0x28, 0xE7, 0xA5, 0xAD, 0x29, 0x45, 0x28, 0xE8, + 0x87, 0xAA, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xB3, + 0x29, 0x45, 0x28, 0xE8, 0xB2, 0xA1, 0x29, 0x45, + 0x28, 0xE8, 0xB3, 0x87, 0x29, 0x45, 0x28, 0xE9, + // Bytes 2180 - 21bf + 0x87, 0x91, 0x29, 0x45, 0x30, 0xE2, 0x81, 0x84, + 0x33, 0x45, 0x31, 0x30, 0xE6, 0x97, 0xA5, 0x45, + 0x31, 0x30, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x30, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x31, 0xE6, 0x97, + 0xA5, 0x45, 0x31, 0x31, 0xE6, 0x9C, 0x88, 0x45, + 0x31, 0x31, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x32, + 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x32, 0xE6, 0x9C, + 0x88, 0x45, 0x31, 0x32, 0xE7, 0x82, 0xB9, 0x45, + // Bytes 21c0 - 21ff + 0x31, 0x33, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x33, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x34, 0xE6, 0x97, + 0xA5, 0x45, 0x31, 0x34, 0xE7, 0x82, 0xB9, 0x45, + 0x31, 0x35, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x35, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x36, 0xE6, 0x97, + 0xA5, 0x45, 0x31, 0x36, 0xE7, 0x82, 0xB9, 0x45, + 0x31, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x37, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x38, 0xE6, 0x97, + // Bytes 2200 - 223f + 0xA5, 0x45, 0x31, 0x38, 0xE7, 0x82, 0xB9, 0x45, + 0x31, 0x39, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x39, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0xE2, 0x81, 0x84, + 0x32, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x33, 0x45, + 0x31, 0xE2, 0x81, 0x84, 0x34, 0x45, 0x31, 0xE2, + 0x81, 0x84, 0x35, 0x45, 0x31, 0xE2, 0x81, 0x84, + 0x36, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x37, 0x45, + 0x31, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x31, 0xE2, + // Bytes 2240 - 227f + 0x81, 0x84, 0x39, 0x45, 0x32, 0x30, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x30, 0xE7, 0x82, 0xB9, 0x45, + 0x32, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x31, + 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x32, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x32, 0xE7, 0x82, 0xB9, 0x45, + 0x32, 0x33, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x33, + 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x34, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x34, 0xE7, 0x82, 0xB9, 0x45, + // Bytes 2280 - 22bf + 0x32, 0x35, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x36, + 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x37, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x38, 0xE6, 0x97, 0xA5, 0x45, + 0x32, 0x39, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0xE2, + 0x81, 0x84, 0x33, 0x45, 0x32, 0xE2, 0x81, 0x84, + 0x35, 0x45, 0x33, 0x30, 0xE6, 0x97, 0xA5, 0x45, + 0x33, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0xE2, + 0x81, 0x84, 0x34, 0x45, 0x33, 0xE2, 0x81, 0x84, + // Bytes 22c0 - 22ff + 0x35, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x38, 0x45, + 0x34, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x35, 0xE2, + 0x81, 0x84, 0x36, 0x45, 0x35, 0xE2, 0x81, 0x84, + 0x38, 0x45, 0x37, 0xE2, 0x81, 0x84, 0x38, 0x45, + 0x41, 0xE2, 0x88, 0x95, 0x6D, 0x45, 0x56, 0xE2, + 0x88, 0x95, 0x6D, 0x45, 0x6D, 0xE2, 0x88, 0x95, + 0x73, 0x46, 0x31, 0xE2, 0x81, 0x84, 0x31, 0x30, + 0x46, 0x43, 0xE2, 0x88, 0x95, 0x6B, 0x67, 0x46, + // Bytes 2300 - 233f + 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x46, 0xD8, + 0xA8, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xA8, + 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD8, + // Bytes 2340 - 237f + 0xAA, 0xD8, 0xAE, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD9, + 0x85, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, 0xD9, 0x85, + 0xD8, 0xAD, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, + 0xAE, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD8, + 0xAC, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, + // Bytes 2380 - 23bf + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xAC, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xAD, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAD, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD8, + 0xB3, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD8, 0xB3, + 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xB3, 0xD8, + // Bytes 23c0 - 23ff + 0xAE, 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAD, + 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xB4, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xB4, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xB4, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD9, + 0x85, 0xD8, 0xAE, 0x46, 0xD8, 0xB4, 0xD9, 0x85, + // Bytes 2400 - 243f + 0xD9, 0x85, 0x46, 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, + 0xAD, 0x46, 0xD8, 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, 0x46, + 0xD8, 0xB5, 0xD9, 0x84, 0xDB, 0x92, 0x46, 0xD8, + 0xB5, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB6, + 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xB6, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB6, 0xD8, 0xAE, + 0xD9, 0x85, 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD8, + // Bytes 2440 - 247f + 0xAD, 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xB9, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD8, + 0xB9, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB9, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xB9, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xBA, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2480 - 24bf + 0x46, 0xD9, 0x81, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD9, 0x81, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, + 0x82, 0xD9, 0x84, 0xDB, 0x92, 0x46, 0xD9, 0x82, + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x82, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x82, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x83, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD8, 0xAC, 0x46, + // Bytes 24c0 - 24ff + 0xD9, 0x84, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x84, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAE, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD9, 0x85, 0xD8, 0xAD, + 0x46, 0xD9, 0x84, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x85, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD9, + // Bytes 2500 - 253f + 0x85, 0xD8, 0xAC, 0xD8, 0xAE, 0x46, 0xD9, 0x85, + 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAD, + 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + // Bytes 2540 - 257f + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x86, 0xD8, 0xAC, + 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD9, + // Bytes 2580 - 25bf + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x87, 0xD9, 0x85, + 0xD8, 0xAC, 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x8A, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x8A, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xA7, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD8, 0xAC, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + // Bytes 25c0 - 25ff + 0xD8, 0xAD, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xAE, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xB1, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xB2, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD9, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD9, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x89, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x86, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x87, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x88, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xDB, 0x90, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x95, 0x46, 0xE0, 0xB9, 0x8D, + 0xE0, 0xB8, 0xB2, 0x46, 0xE0, 0xBA, 0xAB, 0xE0, + 0xBA, 0x99, 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, + 0xA1, 0x46, 0xE0, 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, + // Bytes 2640 - 267f + 0x46, 0xE0, 0xBD, 0x80, 0xE0, 0xBE, 0xB5, 0x46, + 0xE0, 0xBD, 0x82, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBD, 0x8C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, + 0x91, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x96, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x9B, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0x90, 0xE0, 0xBE, + 0xB5, 0x46, 0xE0, 0xBE, 0x92, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBE, 0x9C, 0xE0, 0xBE, 0xB7, 0x46, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xA1, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBE, 0xA6, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0xAB, 0xE0, 0xBE, 0xB7, 0x46, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x46, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x46, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0x46, 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, + 0x46, 0xE3, 0x81, 0xBB, 0xE3, 0x81, 0x8B, 0x46, + 0xE3, 0x82, 0x88, 0xE3, 0x82, 0x8A, 0x46, 0xE3, + // Bytes 26c0 - 26ff + 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0x46, 0xE3, 0x82, + 0xB3, 0xE3, 0x82, 0xB3, 0x46, 0xE3, 0x82, 0xB3, + 0xE3, 0x83, 0x88, 0x46, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x46, 0xE3, 0x83, 0x8A, 0xE3, 0x83, + 0x8E, 0x46, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xB3, + 0x46, 0xE3, 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0x46, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xA9, 0x46, 0xE3, + 0x83, 0xAC, 0xE3, 0x83, 0xA0, 0x46, 0xE5, 0xA4, + // Bytes 2700 - 273f + 0xA7, 0xE6, 0xAD, 0xA3, 0x46, 0xE5, 0xB9, 0xB3, + 0xE6, 0x88, 0x90, 0x46, 0xE6, 0x98, 0x8E, 0xE6, + 0xB2, 0xBB, 0x46, 0xE6, 0x98, 0xAD, 0xE5, 0x92, + 0x8C, 0x47, 0x72, 0x61, 0x64, 0xE2, 0x88, 0x95, + 0x73, 0x47, 0xE3, 0x80, 0x94, 0x53, 0xE3, 0x80, + 0x95, 0x48, 0x28, 0xE1, 0x84, 0x80, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x82, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x83, + // Bytes 2740 - 277f + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x85, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, 0xE1, + // Bytes 2780 - 27bf + 0x85, 0xAE, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8E, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8F, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x72, 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, + 0x32, 0x48, 0xD8, 0xA7, 0xD9, 0x83, 0xD8, 0xA8, + // Bytes 27c0 - 27ff + 0xD8, 0xB1, 0x48, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + 0x84, 0xD9, 0x87, 0x48, 0xD8, 0xB1, 0xD8, 0xB3, + 0xD9, 0x88, 0xD9, 0x84, 0x48, 0xD8, 0xB1, 0xDB, + 0x8C, 0xD8, 0xA7, 0xD9, 0x84, 0x48, 0xD8, 0xB5, + 0xD9, 0x84, 0xD8, 0xB9, 0xD9, 0x85, 0x48, 0xD8, + 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x48, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0xD8, 0xAF, + 0x48, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, 0x84, 0xD9, + // Bytes 2800 - 283f + 0x85, 0x49, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x49, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x49, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x49, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0xE2, 0x88, + 0xAE, 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xB8, 0x89, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE4, + 0xBA, 0x8C, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + // Bytes 2840 - 287f + 0x94, 0xE5, 0x8B, 0x9D, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE5, 0xAE, 0x89, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x89, 0x93, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, + 0x95, 0x97, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x9C, 0xAC, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE7, 0x82, 0xB9, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, 0x9B, 0x97, + // Bytes 2880 - 28bf + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, + 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0xA9, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xB3, + 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, 0xAA, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x49, 0xE3, 0x82, + 0xAB, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAA, 0x49, + // Bytes 28c0 - 28ff + 0xE3, 0x82, 0xB1, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xB9, 0x49, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0x8A, 0x49, 0xE3, 0x82, 0xBB, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x49, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0xE3, 0x82, + 0xB7, 0x49, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x8E, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x83, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0x84, 0x49, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xB3, 0x49, 0xE3, 0x83, 0x95, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xBD, 0x49, + 0xE3, 0x83, 0x98, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + // Bytes 2940 - 297f + 0x84, 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9B, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, + 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x8F, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xAF, 0x49, 0xE3, 0x83, 0xA4, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + // Bytes 2980 - 29bf + 0xA6, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x4C, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0xE2, 0x88, 0xAB, 0x4C, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA1, + 0x4C, 0xE3, 0x82, 0xA8, 0xE3, 0x83, 0xBC, 0xE3, + // Bytes 29c0 - 29ff + 0x82, 0xAB, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xB3, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x9E, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAA, 0xE3, 0x83, + 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x83, 0xA5, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAF, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0x8D, 0x4C, 0xE3, 0x82, + 0xB5, 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, 0xE3, + 0x83, 0xAB, 0x4C, 0xE3, 0x82, 0xBF, 0xE3, 0x82, + // Bytes 2a40 - 2a7f + 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x4C, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x84, 0x4C, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0x4C, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA3, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xBF, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + // Bytes 2a80 - 2abf + 0x82, 0x9A, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0x92, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, + 0x9B, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0x4C, + 0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, 0x83, 0xA1, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, + 0xAB, 0x4C, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, + 0xBC, 0x8F, 0xE4, 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, + 0x4E, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA9, + 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xAE, 0x29, 0x4F, + // Bytes 2b00 - 2b3f + 0xD8, 0xAC, 0xD9, 0x84, 0x20, 0xD8, 0xAC, 0xD9, + 0x84, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x87, 0x4F, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0x4F, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0x4F, + 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4F, + // Bytes 2b40 - 2b7f + 0xE3, 0x82, 0xB5, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x4F, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xAB, 0x4F, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0xAF, 0xE3, 0x82, + 0xBF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x4F, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xA7, 0xE3, 0x83, 0xB3, 0x4F, + 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x4F, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x51, + 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xA5, 0xE1, 0x86, 0xAB, + // Bytes 2bc0 - 2bff + 0x29, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBF, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xBC, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x82, 0xAF, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x52, + 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x82, + // Bytes 2c00 - 2c3f + 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x52, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBB, 0xE3, + 0x82, 0x99, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAD, + 0x52, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xBC, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x52, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0xE3, 0x82, 0xB9, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x83, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0xA7, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0x9F, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0xAC, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, + 0xB1, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, 0x61, + // Bytes 2c80 - 2cbf + 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, 0x20, 0xD8, + 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x20, + 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, 0x87, + 0x20, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, 0x84, 0xD9, + 0x85, 0x06, 0xE0, 0xA7, 0x87, 0xE0, 0xA6, 0xBE, + 0x01, 0x06, 0xE0, 0xA7, 0x87, 0xE0, 0xA7, 0x97, + 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAC, 0xBE, + 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAD, 0x96, + // Bytes 2cc0 - 2cff + 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAD, 0x97, + 0x01, 0x06, 0xE0, 0xAE, 0x92, 0xE0, 0xAF, 0x97, + 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, 0xAE, 0xBE, + 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, 0xAF, 0x97, + 0x01, 0x06, 0xE0, 0xAF, 0x87, 0xE0, 0xAE, 0xBE, + 0x01, 0x06, 0xE0, 0xB2, 0xBF, 0xE0, 0xB3, 0x95, + 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x95, + 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x96, + // Bytes 2d00 - 2d3f + 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, 0xB4, 0xBE, + 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, 0xB5, 0x97, + 0x01, 0x06, 0xE0, 0xB5, 0x87, 0xE0, 0xB4, 0xBE, + 0x01, 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x9F, + 0x01, 0x06, 0xE1, 0x80, 0xA5, 0xE1, 0x80, 0xAE, + 0x01, 0x06, 0xE1, 0xAC, 0x85, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x87, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x89, 0xE1, 0xAC, 0xB5, + // Bytes 2d40 - 2d7f + 0x01, 0x06, 0xE1, 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x91, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAD, 0x82, 0xE1, 0xAC, 0xB5, + // Bytes 2d80 - 2dbf + 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB1, 0xF0, 0x91, + 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB2, + 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, + 0x8D, 0x87, 0xF0, 0x91, 0x8C, 0xBE, 0x01, 0x08, + 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, 0x8D, 0x97, + 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, + 0x92, 0xB0, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, + 0xF0, 0x91, 0x92, 0xBA, 0x01, 0x08, 0xF0, 0x91, + // Bytes 2dc0 - 2dff + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBD, 0x01, 0x08, + 0xF0, 0x91, 0x96, 0xB8, 0xF0, 0x91, 0x96, 0xAF, + 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB9, 0xF0, 0x91, + 0x96, 0xAF, 0x01, 0x09, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x82, 0xE0, 0xB3, 0x95, 0x02, 0x09, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, + 0x12, 0x44, 0x44, 0x5A, 0xCC, 0x8C, 0xC9, 0x44, + 0x44, 0x7A, 0xCC, 0x8C, 0xC9, 0x44, 0x64, 0x7A, + // Bytes 2e00 - 2e3f + 0xCC, 0x8C, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x93, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x94, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x46, 0xE1, 0x84, 0x80, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x82, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x83, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x85, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x86, 0xE1, + // Bytes 2e40 - 2e7f + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x87, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x89, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xAE, 0x01, 0x46, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8F, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x90, 0xE1, + // Bytes 2e80 - 2ebf + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x91, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x92, 0xE1, + 0x85, 0xA1, 0x01, 0x49, 0xE3, 0x83, 0xA1, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0xE1, 0x84, 0x8B, + 0xE1, 0x85, 0xB4, 0x01, 0x4C, 0xE3, 0x82, 0xAD, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0x0D, 0x4C, 0xE3, 0x82, 0xB3, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0xBC, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x4C, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE1, + 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0xE1, 0x86, 0xB7, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA9, 0x01, 0x4F, + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0x8B, 0xE3, 0x83, + 0xB3, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x4F, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xAA, 0xE3, + // Bytes 2f00 - 2f3f + 0x83, 0xB3, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, + 0x0D, 0x4F, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB7, 0xE3, 0x82, + 0x99, 0x0D, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x82, 0xA8, 0xE3, + 0x82, 0xB9, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, 0x52, + // Bytes 2f40 - 2f7f + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA1, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x86, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x82, 0x01, 0x86, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0x01, 0x03, 0x3C, 0xCC, 0xB8, 0x05, + 0x03, 0x3D, 0xCC, 0xB8, 0x05, 0x03, 0x3E, 0xCC, + 0xB8, 0x05, 0x03, 0x41, 0xCC, 0x80, 0xC9, 0x03, + 0x41, 0xCC, 0x81, 0xC9, 0x03, 0x41, 0xCC, 0x83, + // Bytes 2f80 - 2fbf + 0xC9, 0x03, 0x41, 0xCC, 0x84, 0xC9, 0x03, 0x41, + 0xCC, 0x89, 0xC9, 0x03, 0x41, 0xCC, 0x8C, 0xC9, + 0x03, 0x41, 0xCC, 0x8F, 0xC9, 0x03, 0x41, 0xCC, + 0x91, 0xC9, 0x03, 0x41, 0xCC, 0xA5, 0xB5, 0x03, + 0x41, 0xCC, 0xA8, 0xA5, 0x03, 0x42, 0xCC, 0x87, + 0xC9, 0x03, 0x42, 0xCC, 0xA3, 0xB5, 0x03, 0x42, + 0xCC, 0xB1, 0xB5, 0x03, 0x43, 0xCC, 0x81, 0xC9, + 0x03, 0x43, 0xCC, 0x82, 0xC9, 0x03, 0x43, 0xCC, + // Bytes 2fc0 - 2fff + 0x87, 0xC9, 0x03, 0x43, 0xCC, 0x8C, 0xC9, 0x03, + 0x44, 0xCC, 0x87, 0xC9, 0x03, 0x44, 0xCC, 0x8C, + 0xC9, 0x03, 0x44, 0xCC, 0xA3, 0xB5, 0x03, 0x44, + 0xCC, 0xA7, 0xA5, 0x03, 0x44, 0xCC, 0xAD, 0xB5, + 0x03, 0x44, 0xCC, 0xB1, 0xB5, 0x03, 0x45, 0xCC, + 0x80, 0xC9, 0x03, 0x45, 0xCC, 0x81, 0xC9, 0x03, + 0x45, 0xCC, 0x83, 0xC9, 0x03, 0x45, 0xCC, 0x86, + 0xC9, 0x03, 0x45, 0xCC, 0x87, 0xC9, 0x03, 0x45, + // Bytes 3000 - 303f + 0xCC, 0x88, 0xC9, 0x03, 0x45, 0xCC, 0x89, 0xC9, + 0x03, 0x45, 0xCC, 0x8C, 0xC9, 0x03, 0x45, 0xCC, + 0x8F, 0xC9, 0x03, 0x45, 0xCC, 0x91, 0xC9, 0x03, + 0x45, 0xCC, 0xA8, 0xA5, 0x03, 0x45, 0xCC, 0xAD, + 0xB5, 0x03, 0x45, 0xCC, 0xB0, 0xB5, 0x03, 0x46, + 0xCC, 0x87, 0xC9, 0x03, 0x47, 0xCC, 0x81, 0xC9, + 0x03, 0x47, 0xCC, 0x82, 0xC9, 0x03, 0x47, 0xCC, + 0x84, 0xC9, 0x03, 0x47, 0xCC, 0x86, 0xC9, 0x03, + // Bytes 3040 - 307f + 0x47, 0xCC, 0x87, 0xC9, 0x03, 0x47, 0xCC, 0x8C, + 0xC9, 0x03, 0x47, 0xCC, 0xA7, 0xA5, 0x03, 0x48, + 0xCC, 0x82, 0xC9, 0x03, 0x48, 0xCC, 0x87, 0xC9, + 0x03, 0x48, 0xCC, 0x88, 0xC9, 0x03, 0x48, 0xCC, + 0x8C, 0xC9, 0x03, 0x48, 0xCC, 0xA3, 0xB5, 0x03, + 0x48, 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0xAE, + 0xB5, 0x03, 0x49, 0xCC, 0x80, 0xC9, 0x03, 0x49, + 0xCC, 0x81, 0xC9, 0x03, 0x49, 0xCC, 0x82, 0xC9, + // Bytes 3080 - 30bf + 0x03, 0x49, 0xCC, 0x83, 0xC9, 0x03, 0x49, 0xCC, + 0x84, 0xC9, 0x03, 0x49, 0xCC, 0x86, 0xC9, 0x03, + 0x49, 0xCC, 0x87, 0xC9, 0x03, 0x49, 0xCC, 0x89, + 0xC9, 0x03, 0x49, 0xCC, 0x8C, 0xC9, 0x03, 0x49, + 0xCC, 0x8F, 0xC9, 0x03, 0x49, 0xCC, 0x91, 0xC9, + 0x03, 0x49, 0xCC, 0xA3, 0xB5, 0x03, 0x49, 0xCC, + 0xA8, 0xA5, 0x03, 0x49, 0xCC, 0xB0, 0xB5, 0x03, + 0x4A, 0xCC, 0x82, 0xC9, 0x03, 0x4B, 0xCC, 0x81, + // Bytes 30c0 - 30ff + 0xC9, 0x03, 0x4B, 0xCC, 0x8C, 0xC9, 0x03, 0x4B, + 0xCC, 0xA3, 0xB5, 0x03, 0x4B, 0xCC, 0xA7, 0xA5, + 0x03, 0x4B, 0xCC, 0xB1, 0xB5, 0x03, 0x4C, 0xCC, + 0x81, 0xC9, 0x03, 0x4C, 0xCC, 0x8C, 0xC9, 0x03, + 0x4C, 0xCC, 0xA7, 0xA5, 0x03, 0x4C, 0xCC, 0xAD, + 0xB5, 0x03, 0x4C, 0xCC, 0xB1, 0xB5, 0x03, 0x4D, + 0xCC, 0x81, 0xC9, 0x03, 0x4D, 0xCC, 0x87, 0xC9, + 0x03, 0x4D, 0xCC, 0xA3, 0xB5, 0x03, 0x4E, 0xCC, + // Bytes 3100 - 313f + 0x80, 0xC9, 0x03, 0x4E, 0xCC, 0x81, 0xC9, 0x03, + 0x4E, 0xCC, 0x83, 0xC9, 0x03, 0x4E, 0xCC, 0x87, + 0xC9, 0x03, 0x4E, 0xCC, 0x8C, 0xC9, 0x03, 0x4E, + 0xCC, 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0xA7, 0xA5, + 0x03, 0x4E, 0xCC, 0xAD, 0xB5, 0x03, 0x4E, 0xCC, + 0xB1, 0xB5, 0x03, 0x4F, 0xCC, 0x80, 0xC9, 0x03, + 0x4F, 0xCC, 0x81, 0xC9, 0x03, 0x4F, 0xCC, 0x86, + 0xC9, 0x03, 0x4F, 0xCC, 0x89, 0xC9, 0x03, 0x4F, + // Bytes 3140 - 317f + 0xCC, 0x8B, 0xC9, 0x03, 0x4F, 0xCC, 0x8C, 0xC9, + 0x03, 0x4F, 0xCC, 0x8F, 0xC9, 0x03, 0x4F, 0xCC, + 0x91, 0xC9, 0x03, 0x50, 0xCC, 0x81, 0xC9, 0x03, + 0x50, 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x81, + 0xC9, 0x03, 0x52, 0xCC, 0x87, 0xC9, 0x03, 0x52, + 0xCC, 0x8C, 0xC9, 0x03, 0x52, 0xCC, 0x8F, 0xC9, + 0x03, 0x52, 0xCC, 0x91, 0xC9, 0x03, 0x52, 0xCC, + 0xA7, 0xA5, 0x03, 0x52, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3180 - 31bf + 0x53, 0xCC, 0x82, 0xC9, 0x03, 0x53, 0xCC, 0x87, + 0xC9, 0x03, 0x53, 0xCC, 0xA6, 0xB5, 0x03, 0x53, + 0xCC, 0xA7, 0xA5, 0x03, 0x54, 0xCC, 0x87, 0xC9, + 0x03, 0x54, 0xCC, 0x8C, 0xC9, 0x03, 0x54, 0xCC, + 0xA3, 0xB5, 0x03, 0x54, 0xCC, 0xA6, 0xB5, 0x03, + 0x54, 0xCC, 0xA7, 0xA5, 0x03, 0x54, 0xCC, 0xAD, + 0xB5, 0x03, 0x54, 0xCC, 0xB1, 0xB5, 0x03, 0x55, + 0xCC, 0x80, 0xC9, 0x03, 0x55, 0xCC, 0x81, 0xC9, + // Bytes 31c0 - 31ff + 0x03, 0x55, 0xCC, 0x82, 0xC9, 0x03, 0x55, 0xCC, + 0x86, 0xC9, 0x03, 0x55, 0xCC, 0x89, 0xC9, 0x03, + 0x55, 0xCC, 0x8A, 0xC9, 0x03, 0x55, 0xCC, 0x8B, + 0xC9, 0x03, 0x55, 0xCC, 0x8C, 0xC9, 0x03, 0x55, + 0xCC, 0x8F, 0xC9, 0x03, 0x55, 0xCC, 0x91, 0xC9, + 0x03, 0x55, 0xCC, 0xA3, 0xB5, 0x03, 0x55, 0xCC, + 0xA4, 0xB5, 0x03, 0x55, 0xCC, 0xA8, 0xA5, 0x03, + 0x55, 0xCC, 0xAD, 0xB5, 0x03, 0x55, 0xCC, 0xB0, + // Bytes 3200 - 323f + 0xB5, 0x03, 0x56, 0xCC, 0x83, 0xC9, 0x03, 0x56, + 0xCC, 0xA3, 0xB5, 0x03, 0x57, 0xCC, 0x80, 0xC9, + 0x03, 0x57, 0xCC, 0x81, 0xC9, 0x03, 0x57, 0xCC, + 0x82, 0xC9, 0x03, 0x57, 0xCC, 0x87, 0xC9, 0x03, + 0x57, 0xCC, 0x88, 0xC9, 0x03, 0x57, 0xCC, 0xA3, + 0xB5, 0x03, 0x58, 0xCC, 0x87, 0xC9, 0x03, 0x58, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x80, 0xC9, + 0x03, 0x59, 0xCC, 0x81, 0xC9, 0x03, 0x59, 0xCC, + // Bytes 3240 - 327f + 0x82, 0xC9, 0x03, 0x59, 0xCC, 0x83, 0xC9, 0x03, + 0x59, 0xCC, 0x84, 0xC9, 0x03, 0x59, 0xCC, 0x87, + 0xC9, 0x03, 0x59, 0xCC, 0x88, 0xC9, 0x03, 0x59, + 0xCC, 0x89, 0xC9, 0x03, 0x59, 0xCC, 0xA3, 0xB5, + 0x03, 0x5A, 0xCC, 0x81, 0xC9, 0x03, 0x5A, 0xCC, + 0x82, 0xC9, 0x03, 0x5A, 0xCC, 0x87, 0xC9, 0x03, + 0x5A, 0xCC, 0x8C, 0xC9, 0x03, 0x5A, 0xCC, 0xA3, + 0xB5, 0x03, 0x5A, 0xCC, 0xB1, 0xB5, 0x03, 0x61, + // Bytes 3280 - 32bf + 0xCC, 0x80, 0xC9, 0x03, 0x61, 0xCC, 0x81, 0xC9, + 0x03, 0x61, 0xCC, 0x83, 0xC9, 0x03, 0x61, 0xCC, + 0x84, 0xC9, 0x03, 0x61, 0xCC, 0x89, 0xC9, 0x03, + 0x61, 0xCC, 0x8C, 0xC9, 0x03, 0x61, 0xCC, 0x8F, + 0xC9, 0x03, 0x61, 0xCC, 0x91, 0xC9, 0x03, 0x61, + 0xCC, 0xA5, 0xB5, 0x03, 0x61, 0xCC, 0xA8, 0xA5, + 0x03, 0x62, 0xCC, 0x87, 0xC9, 0x03, 0x62, 0xCC, + 0xA3, 0xB5, 0x03, 0x62, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 32c0 - 32ff + 0x63, 0xCC, 0x81, 0xC9, 0x03, 0x63, 0xCC, 0x82, + 0xC9, 0x03, 0x63, 0xCC, 0x87, 0xC9, 0x03, 0x63, + 0xCC, 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0x87, 0xC9, + 0x03, 0x64, 0xCC, 0x8C, 0xC9, 0x03, 0x64, 0xCC, + 0xA3, 0xB5, 0x03, 0x64, 0xCC, 0xA7, 0xA5, 0x03, + 0x64, 0xCC, 0xAD, 0xB5, 0x03, 0x64, 0xCC, 0xB1, + 0xB5, 0x03, 0x65, 0xCC, 0x80, 0xC9, 0x03, 0x65, + 0xCC, 0x81, 0xC9, 0x03, 0x65, 0xCC, 0x83, 0xC9, + // Bytes 3300 - 333f + 0x03, 0x65, 0xCC, 0x86, 0xC9, 0x03, 0x65, 0xCC, + 0x87, 0xC9, 0x03, 0x65, 0xCC, 0x88, 0xC9, 0x03, + 0x65, 0xCC, 0x89, 0xC9, 0x03, 0x65, 0xCC, 0x8C, + 0xC9, 0x03, 0x65, 0xCC, 0x8F, 0xC9, 0x03, 0x65, + 0xCC, 0x91, 0xC9, 0x03, 0x65, 0xCC, 0xA8, 0xA5, + 0x03, 0x65, 0xCC, 0xAD, 0xB5, 0x03, 0x65, 0xCC, + 0xB0, 0xB5, 0x03, 0x66, 0xCC, 0x87, 0xC9, 0x03, + 0x67, 0xCC, 0x81, 0xC9, 0x03, 0x67, 0xCC, 0x82, + // Bytes 3340 - 337f + 0xC9, 0x03, 0x67, 0xCC, 0x84, 0xC9, 0x03, 0x67, + 0xCC, 0x86, 0xC9, 0x03, 0x67, 0xCC, 0x87, 0xC9, + 0x03, 0x67, 0xCC, 0x8C, 0xC9, 0x03, 0x67, 0xCC, + 0xA7, 0xA5, 0x03, 0x68, 0xCC, 0x82, 0xC9, 0x03, + 0x68, 0xCC, 0x87, 0xC9, 0x03, 0x68, 0xCC, 0x88, + 0xC9, 0x03, 0x68, 0xCC, 0x8C, 0xC9, 0x03, 0x68, + 0xCC, 0xA3, 0xB5, 0x03, 0x68, 0xCC, 0xA7, 0xA5, + 0x03, 0x68, 0xCC, 0xAE, 0xB5, 0x03, 0x68, 0xCC, + // Bytes 3380 - 33bf + 0xB1, 0xB5, 0x03, 0x69, 0xCC, 0x80, 0xC9, 0x03, + 0x69, 0xCC, 0x81, 0xC9, 0x03, 0x69, 0xCC, 0x82, + 0xC9, 0x03, 0x69, 0xCC, 0x83, 0xC9, 0x03, 0x69, + 0xCC, 0x84, 0xC9, 0x03, 0x69, 0xCC, 0x86, 0xC9, + 0x03, 0x69, 0xCC, 0x89, 0xC9, 0x03, 0x69, 0xCC, + 0x8C, 0xC9, 0x03, 0x69, 0xCC, 0x8F, 0xC9, 0x03, + 0x69, 0xCC, 0x91, 0xC9, 0x03, 0x69, 0xCC, 0xA3, + 0xB5, 0x03, 0x69, 0xCC, 0xA8, 0xA5, 0x03, 0x69, + // Bytes 33c0 - 33ff + 0xCC, 0xB0, 0xB5, 0x03, 0x6A, 0xCC, 0x82, 0xC9, + 0x03, 0x6A, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, + 0x81, 0xC9, 0x03, 0x6B, 0xCC, 0x8C, 0xC9, 0x03, + 0x6B, 0xCC, 0xA3, 0xB5, 0x03, 0x6B, 0xCC, 0xA7, + 0xA5, 0x03, 0x6B, 0xCC, 0xB1, 0xB5, 0x03, 0x6C, + 0xCC, 0x81, 0xC9, 0x03, 0x6C, 0xCC, 0x8C, 0xC9, + 0x03, 0x6C, 0xCC, 0xA7, 0xA5, 0x03, 0x6C, 0xCC, + 0xAD, 0xB5, 0x03, 0x6C, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3400 - 343f + 0x6D, 0xCC, 0x81, 0xC9, 0x03, 0x6D, 0xCC, 0x87, + 0xC9, 0x03, 0x6D, 0xCC, 0xA3, 0xB5, 0x03, 0x6E, + 0xCC, 0x80, 0xC9, 0x03, 0x6E, 0xCC, 0x81, 0xC9, + 0x03, 0x6E, 0xCC, 0x83, 0xC9, 0x03, 0x6E, 0xCC, + 0x87, 0xC9, 0x03, 0x6E, 0xCC, 0x8C, 0xC9, 0x03, + 0x6E, 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0xA7, + 0xA5, 0x03, 0x6E, 0xCC, 0xAD, 0xB5, 0x03, 0x6E, + 0xCC, 0xB1, 0xB5, 0x03, 0x6F, 0xCC, 0x80, 0xC9, + // Bytes 3440 - 347f + 0x03, 0x6F, 0xCC, 0x81, 0xC9, 0x03, 0x6F, 0xCC, + 0x86, 0xC9, 0x03, 0x6F, 0xCC, 0x89, 0xC9, 0x03, + 0x6F, 0xCC, 0x8B, 0xC9, 0x03, 0x6F, 0xCC, 0x8C, + 0xC9, 0x03, 0x6F, 0xCC, 0x8F, 0xC9, 0x03, 0x6F, + 0xCC, 0x91, 0xC9, 0x03, 0x70, 0xCC, 0x81, 0xC9, + 0x03, 0x70, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, + 0x81, 0xC9, 0x03, 0x72, 0xCC, 0x87, 0xC9, 0x03, + 0x72, 0xCC, 0x8C, 0xC9, 0x03, 0x72, 0xCC, 0x8F, + // Bytes 3480 - 34bf + 0xC9, 0x03, 0x72, 0xCC, 0x91, 0xC9, 0x03, 0x72, + 0xCC, 0xA7, 0xA5, 0x03, 0x72, 0xCC, 0xB1, 0xB5, + 0x03, 0x73, 0xCC, 0x82, 0xC9, 0x03, 0x73, 0xCC, + 0x87, 0xC9, 0x03, 0x73, 0xCC, 0xA6, 0xB5, 0x03, + 0x73, 0xCC, 0xA7, 0xA5, 0x03, 0x74, 0xCC, 0x87, + 0xC9, 0x03, 0x74, 0xCC, 0x88, 0xC9, 0x03, 0x74, + 0xCC, 0x8C, 0xC9, 0x03, 0x74, 0xCC, 0xA3, 0xB5, + 0x03, 0x74, 0xCC, 0xA6, 0xB5, 0x03, 0x74, 0xCC, + // Bytes 34c0 - 34ff + 0xA7, 0xA5, 0x03, 0x74, 0xCC, 0xAD, 0xB5, 0x03, + 0x74, 0xCC, 0xB1, 0xB5, 0x03, 0x75, 0xCC, 0x80, + 0xC9, 0x03, 0x75, 0xCC, 0x81, 0xC9, 0x03, 0x75, + 0xCC, 0x82, 0xC9, 0x03, 0x75, 0xCC, 0x86, 0xC9, + 0x03, 0x75, 0xCC, 0x89, 0xC9, 0x03, 0x75, 0xCC, + 0x8A, 0xC9, 0x03, 0x75, 0xCC, 0x8B, 0xC9, 0x03, + 0x75, 0xCC, 0x8C, 0xC9, 0x03, 0x75, 0xCC, 0x8F, + 0xC9, 0x03, 0x75, 0xCC, 0x91, 0xC9, 0x03, 0x75, + // Bytes 3500 - 353f + 0xCC, 0xA3, 0xB5, 0x03, 0x75, 0xCC, 0xA4, 0xB5, + 0x03, 0x75, 0xCC, 0xA8, 0xA5, 0x03, 0x75, 0xCC, + 0xAD, 0xB5, 0x03, 0x75, 0xCC, 0xB0, 0xB5, 0x03, + 0x76, 0xCC, 0x83, 0xC9, 0x03, 0x76, 0xCC, 0xA3, + 0xB5, 0x03, 0x77, 0xCC, 0x80, 0xC9, 0x03, 0x77, + 0xCC, 0x81, 0xC9, 0x03, 0x77, 0xCC, 0x82, 0xC9, + 0x03, 0x77, 0xCC, 0x87, 0xC9, 0x03, 0x77, 0xCC, + 0x88, 0xC9, 0x03, 0x77, 0xCC, 0x8A, 0xC9, 0x03, + // Bytes 3540 - 357f + 0x77, 0xCC, 0xA3, 0xB5, 0x03, 0x78, 0xCC, 0x87, + 0xC9, 0x03, 0x78, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x80, 0xC9, 0x03, 0x79, 0xCC, 0x81, 0xC9, + 0x03, 0x79, 0xCC, 0x82, 0xC9, 0x03, 0x79, 0xCC, + 0x83, 0xC9, 0x03, 0x79, 0xCC, 0x84, 0xC9, 0x03, + 0x79, 0xCC, 0x87, 0xC9, 0x03, 0x79, 0xCC, 0x88, + 0xC9, 0x03, 0x79, 0xCC, 0x89, 0xC9, 0x03, 0x79, + 0xCC, 0x8A, 0xC9, 0x03, 0x79, 0xCC, 0xA3, 0xB5, + // Bytes 3580 - 35bf + 0x03, 0x7A, 0xCC, 0x81, 0xC9, 0x03, 0x7A, 0xCC, + 0x82, 0xC9, 0x03, 0x7A, 0xCC, 0x87, 0xC9, 0x03, + 0x7A, 0xCC, 0x8C, 0xC9, 0x03, 0x7A, 0xCC, 0xA3, + 0xB5, 0x03, 0x7A, 0xCC, 0xB1, 0xB5, 0x04, 0xC2, + 0xA8, 0xCC, 0x80, 0xCA, 0x04, 0xC2, 0xA8, 0xCC, + 0x81, 0xCA, 0x04, 0xC2, 0xA8, 0xCD, 0x82, 0xCA, + 0x04, 0xC3, 0x86, 0xCC, 0x81, 0xC9, 0x04, 0xC3, + 0x86, 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0x98, 0xCC, + // Bytes 35c0 - 35ff + 0x81, 0xC9, 0x04, 0xC3, 0xA6, 0xCC, 0x81, 0xC9, + 0x04, 0xC3, 0xA6, 0xCC, 0x84, 0xC9, 0x04, 0xC3, + 0xB8, 0xCC, 0x81, 0xC9, 0x04, 0xC5, 0xBF, 0xCC, + 0x87, 0xC9, 0x04, 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, + 0x04, 0xCA, 0x92, 0xCC, 0x8C, 0xC9, 0x04, 0xCE, + 0x91, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x91, 0xCC, + 0x81, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x84, 0xC9, + 0x04, 0xCE, 0x91, 0xCC, 0x86, 0xC9, 0x04, 0xCE, + // Bytes 3600 - 363f + 0x91, 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0x95, 0xCC, + 0x80, 0xC9, 0x04, 0xCE, 0x95, 0xCC, 0x81, 0xC9, + 0x04, 0xCE, 0x97, 0xCC, 0x80, 0xC9, 0x04, 0xCE, + 0x97, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, 0xCD, + 0x85, 0xD9, 0x04, 0xCE, 0x99, 0xCC, 0x80, 0xC9, + 0x04, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x04, 0xCE, + 0x99, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x99, 0xCC, + 0x86, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x88, 0xC9, + // Bytes 3640 - 367f + 0x04, 0xCE, 0x9F, 0xCC, 0x80, 0xC9, 0x04, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA1, 0xCC, + 0x94, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x80, 0xC9, + 0x04, 0xCE, 0xA5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, + 0xA5, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, + 0x86, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x04, 0xCE, 0xA9, 0xCC, 0x80, 0xC9, 0x04, 0xCE, + 0xA9, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA9, 0xCD, + // Bytes 3680 - 36bf + 0x85, 0xD9, 0x04, 0xCE, 0xB1, 0xCC, 0x84, 0xC9, + 0x04, 0xCE, 0xB1, 0xCC, 0x86, 0xC9, 0x04, 0xCE, + 0xB1, 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB5, 0xCC, + 0x80, 0xC9, 0x04, 0xCE, 0xB5, 0xCC, 0x81, 0xC9, + 0x04, 0xCE, 0xB7, 0xCD, 0x85, 0xD9, 0x04, 0xCE, + 0xB9, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xB9, 0xCC, + 0x81, 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x84, 0xC9, + 0x04, 0xCE, 0xB9, 0xCC, 0x86, 0xC9, 0x04, 0xCE, + // Bytes 36c0 - 36ff + 0xB9, 0xCD, 0x82, 0xC9, 0x04, 0xCE, 0xBF, 0xCC, + 0x80, 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x81, 0xC9, + 0x04, 0xCF, 0x81, 0xCC, 0x93, 0xC9, 0x04, 0xCF, + 0x81, 0xCC, 0x94, 0xC9, 0x04, 0xCF, 0x85, 0xCC, + 0x80, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x81, 0xC9, + 0x04, 0xCF, 0x85, 0xCC, 0x84, 0xC9, 0x04, 0xCF, + 0x85, 0xCC, 0x86, 0xC9, 0x04, 0xCF, 0x85, 0xCD, + 0x82, 0xC9, 0x04, 0xCF, 0x89, 0xCD, 0x85, 0xD9, + // Bytes 3700 - 373f + 0x04, 0xCF, 0x92, 0xCC, 0x81, 0xC9, 0x04, 0xCF, + 0x92, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x86, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0x90, 0xCC, 0x86, 0xC9, + 0x04, 0xD0, 0x90, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0x93, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x95, 0xCC, + 0x80, 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x86, 0xC9, + 0x04, 0xD0, 0x95, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0x96, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x96, 0xCC, + // Bytes 3740 - 377f + 0x88, 0xC9, 0x04, 0xD0, 0x97, 0xCC, 0x88, 0xC9, + 0x04, 0xD0, 0x98, 0xCC, 0x80, 0xC9, 0x04, 0xD0, + 0x98, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0x98, 0xCC, + 0x86, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x88, 0xC9, + 0x04, 0xD0, 0x9A, 0xCC, 0x81, 0xC9, 0x04, 0xD0, + 0x9E, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, + 0x84, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x86, 0xC9, + 0x04, 0xD0, 0xA3, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + // Bytes 3780 - 37bf + 0xA3, 0xCC, 0x8B, 0xC9, 0x04, 0xD0, 0xA7, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0xAB, 0xCC, 0x88, 0xC9, + 0x04, 0xD0, 0xAD, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0xB0, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0xB3, 0xCC, 0x81, 0xC9, + 0x04, 0xD0, 0xB5, 0xCC, 0x80, 0xC9, 0x04, 0xD0, + 0xB5, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0xB6, 0xCC, 0x86, 0xC9, + // Bytes 37c0 - 37ff + 0x04, 0xD0, 0xB6, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0xB7, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, + 0x80, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x84, 0xC9, + 0x04, 0xD0, 0xB8, 0xCC, 0x86, 0xC9, 0x04, 0xD0, + 0xB8, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xBA, 0xCC, + 0x81, 0xC9, 0x04, 0xD0, 0xBE, 0xCC, 0x88, 0xC9, + 0x04, 0xD1, 0x83, 0xCC, 0x84, 0xC9, 0x04, 0xD1, + 0x83, 0xCC, 0x86, 0xC9, 0x04, 0xD1, 0x83, 0xCC, + // Bytes 3800 - 383f + 0x88, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x8B, 0xC9, + 0x04, 0xD1, 0x87, 0xCC, 0x88, 0xC9, 0x04, 0xD1, + 0x8B, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8D, 0xCC, + 0x88, 0xC9, 0x04, 0xD1, 0x96, 0xCC, 0x88, 0xC9, + 0x04, 0xD1, 0xB4, 0xCC, 0x8F, 0xC9, 0x04, 0xD1, + 0xB5, 0xCC, 0x8F, 0xC9, 0x04, 0xD3, 0x98, 0xCC, + 0x88, 0xC9, 0x04, 0xD3, 0x99, 0xCC, 0x88, 0xC9, + 0x04, 0xD3, 0xA8, 0xCC, 0x88, 0xC9, 0x04, 0xD3, + // Bytes 3840 - 387f + 0xA9, 0xCC, 0x88, 0xC9, 0x04, 0xD8, 0xA7, 0xD9, + 0x93, 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x04, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, 0x04, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x04, 0xD9, 0x8A, 0xD9, + 0x94, 0xC9, 0x04, 0xDB, 0x81, 0xD9, 0x94, 0xC9, + 0x04, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x04, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x05, 0x41, 0xCC, 0x82, + 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, + // Bytes 3880 - 38bf + 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x83, + 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x89, 0xCA, + 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x80, 0xCA, 0x05, + 0x41, 0xCC, 0x86, 0xCC, 0x81, 0xCA, 0x05, 0x41, + 0xCC, 0x86, 0xCC, 0x83, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, 0x87, + 0xCC, 0x84, 0xCA, 0x05, 0x41, 0xCC, 0x88, 0xCC, + 0x84, 0xCA, 0x05, 0x41, 0xCC, 0x8A, 0xCC, 0x81, + // Bytes 38c0 - 38ff + 0xCA, 0x05, 0x41, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x41, 0xCC, 0xA3, 0xCC, 0x86, 0xCA, 0x05, + 0x43, 0xCC, 0xA7, 0xCC, 0x81, 0xCA, 0x05, 0x45, + 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, 0x45, 0xCC, + 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, + 0xCC, 0x83, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x89, 0xCA, 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + // Bytes 3900 - 393f + 0x05, 0x45, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x45, 0xCC, 0xA7, 0xCC, 0x86, 0xCA, 0x05, 0x49, + 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x4C, 0xCC, + 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x82, + 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, + 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x83, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x89, 0xCA, + 0x05, 0x4F, 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3940 - 397f + 0x4F, 0xCC, 0x83, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x83, 0xCC, 0x88, 0xCA, 0x05, 0x4F, 0xCC, + 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, 0x84, + 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x87, 0xCC, + 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x88, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + // Bytes 3980 - 39bf + 0xCC, 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0xA3, 0xB6, 0x05, 0x4F, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x4F, 0xCC, 0xA8, 0xCC, + 0x84, 0xCA, 0x05, 0x52, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x53, 0xCC, 0x81, 0xCC, 0x87, 0xCA, + 0x05, 0x53, 0xCC, 0x8C, 0xCC, 0x87, 0xCA, 0x05, + 0x53, 0xCC, 0xA3, 0xCC, 0x87, 0xCA, 0x05, 0x55, + 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, + // Bytes 39c0 - 39ff + 0x84, 0xCC, 0x88, 0xCA, 0x05, 0x55, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x84, + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x8C, 0xCA, + 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, + 0x55, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x55, + 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + // Bytes 3a00 - 3a3f + 0xCC, 0xA3, 0xB6, 0x05, 0x61, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x61, 0xCC, 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x61, + 0xCC, 0x86, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0x86, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x87, 0xCC, + // Bytes 3a40 - 3a7f + 0x84, 0xCA, 0x05, 0x61, 0xCC, 0x88, 0xCC, 0x84, + 0xCA, 0x05, 0x61, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, + 0x05, 0x61, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x61, 0xCC, 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x63, + 0xCC, 0xA7, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, + 0x82, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, 0x82, + 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, + 0x83, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x89, + // Bytes 3a80 - 3abf + 0xCA, 0x05, 0x65, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x65, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x65, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x65, + 0xCC, 0xA7, 0xCC, 0x86, 0xCA, 0x05, 0x69, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x6C, 0xCC, 0xA3, + 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + // Bytes 3ac0 - 3aff + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x6F, 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x83, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x83, 0xCC, 0x88, 0xCA, 0x05, 0x6F, 0xCC, 0x84, + 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, + 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x87, 0xCC, 0x84, + 0xCA, 0x05, 0x6F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, + // Bytes 3b00 - 3b3f + 0x6F, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0xA3, 0xB6, 0x05, 0x6F, 0xCC, 0xA3, 0xCC, + 0x82, 0xCA, 0x05, 0x6F, 0xCC, 0xA8, 0xCC, 0x84, + 0xCA, 0x05, 0x72, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x73, 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, + 0x73, 0xCC, 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x73, + // Bytes 3b40 - 3b7f + 0xCC, 0xA3, 0xCC, 0x87, 0xCA, 0x05, 0x75, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x84, + 0xCC, 0x88, 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, + 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x84, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, + 0x75, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x75, + 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x75, 0xCC, + // Bytes 3b80 - 3bbf + 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x89, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0xA3, 0xB6, 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x80, + 0xCA, 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x81, 0xCA, + 0x05, 0xE1, 0xBE, 0xBF, 0xCD, 0x82, 0xCA, 0x05, + 0xE1, 0xBF, 0xBE, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBF, 0xBE, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBF, + 0xBE, 0xCD, 0x82, 0xCA, 0x05, 0xE2, 0x86, 0x90, + // Bytes 3bc0 - 3bff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x86, 0x92, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x86, 0x94, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x87, 0x90, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x87, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x87, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, + 0x88, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x8B, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0xA3, 0xCC, + // Bytes 3c00 - 3c3f + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0xA5, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0xBC, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0x83, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0x85, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0x88, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x8D, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA1, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA4, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA5, 0xCC, 0xB8, + // Bytes 3c40 - 3c7f + 0x05, 0x05, 0xE2, 0x89, 0xB2, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xB3, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xB6, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xB7, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xBA, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBB, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBC, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBD, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x82, 0xCC, 0xB8, 0x05, + // Bytes 3c80 - 3cbf + 0x05, 0xE2, 0x8A, 0x83, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0x86, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x87, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x91, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x92, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xA2, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xA9, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xAB, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3cc0 - 3cff + 0xE2, 0x8A, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xB4, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB5, + 0xCC, 0xB8, 0x05, 0x06, 0xCE, 0x91, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x91, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x95, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x94, + // Bytes 3d00 - 3d3f + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x97, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x99, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x93, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94, + // Bytes 3d40 - 3d7f + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94, + // Bytes 3d80 - 3dbf + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xA9, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x80, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x81, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCD, 0x82, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB5, 0xCC, 0x93, + // Bytes 3dc0 - 3dff + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB7, 0xCC, 0x80, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x81, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCD, 0x82, + // Bytes 3e00 - 3e3f + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB9, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x88, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x88, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94, + // Bytes 3e40 - 3e7f + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88, + // Bytes 3e80 - 3ebf + 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93, + 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x89, 0xCC, 0x80, + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x81, + // Bytes 3ec0 - 3eff + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCD, 0x82, + 0xCD, 0x85, 0xDA, 0x06, 0xE0, 0xA4, 0xA8, 0xE0, + 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xA4, 0xB0, 0xE0, + 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xA4, 0xB3, 0xE0, + 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xB1, 0x86, 0xE0, + 0xB1, 0x96, 0x85, 0x06, 0xE0, 0xB7, 0x99, 0xE0, + // Bytes 3f00 - 3f3f + 0xB7, 0x8A, 0x11, 0x06, 0xE3, 0x81, 0x86, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8B, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8D, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8F, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x91, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x93, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x95, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x97, 0xE3, + // Bytes 3f40 - 3f7f + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x99, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9B, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9D, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9F, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA1, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA4, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA6, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA8, 0xE3, + // Bytes 3f80 - 3fbf + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xAF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xAF, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB2, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB2, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB5, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB5, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB8, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB8, 0xE3, + // Bytes 3fc0 - 3fff + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xBB, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xBB, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x82, 0x9D, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xA6, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB1, 0xE3, + // Bytes 4000 - 403f + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB3, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB5, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB7, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBB, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBD, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x81, 0xE3, + // Bytes 4040 - 407f + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x84, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x95, 0xE3, + // Bytes 4080 - 40bf + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0xAF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB0, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB1, 0xE3, + // Bytes 40c0 - 40ff + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB2, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xBD, 0xE3, + 0x82, 0x99, 0x0D, 0x08, 0xCE, 0x91, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, + // Bytes 4100 - 413f + 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + // Bytes 4140 - 417f + 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, + // Bytes 4180 - 41bf + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 41c0 - 41ff + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, + // Bytes 4200 - 423f + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDB, 0x08, 0xF0, 0x91, 0x82, 0x99, + // Bytes 4240 - 427f + 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, + 0x82, 0x9B, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x08, + 0xF0, 0x91, 0x82, 0xA5, 0xF0, 0x91, 0x82, 0xBA, + 0x09, 0x42, 0xC2, 0xB4, 0x01, 0x43, 0x20, 0xCC, + 0x81, 0xC9, 0x43, 0x20, 0xCC, 0x83, 0xC9, 0x43, + 0x20, 0xCC, 0x84, 0xC9, 0x43, 0x20, 0xCC, 0x85, + 0xC9, 0x43, 0x20, 0xCC, 0x86, 0xC9, 0x43, 0x20, + 0xCC, 0x87, 0xC9, 0x43, 0x20, 0xCC, 0x88, 0xC9, + // Bytes 4280 - 42bf + 0x43, 0x20, 0xCC, 0x8A, 0xC9, 0x43, 0x20, 0xCC, + 0x8B, 0xC9, 0x43, 0x20, 0xCC, 0x93, 0xC9, 0x43, + 0x20, 0xCC, 0x94, 0xC9, 0x43, 0x20, 0xCC, 0xA7, + 0xA5, 0x43, 0x20, 0xCC, 0xA8, 0xA5, 0x43, 0x20, + 0xCC, 0xB3, 0xB5, 0x43, 0x20, 0xCD, 0x82, 0xC9, + 0x43, 0x20, 0xCD, 0x85, 0xD9, 0x43, 0x20, 0xD9, + 0x8B, 0x59, 0x43, 0x20, 0xD9, 0x8C, 0x5D, 0x43, + 0x20, 0xD9, 0x8D, 0x61, 0x43, 0x20, 0xD9, 0x8E, + // Bytes 42c0 - 42ff + 0x65, 0x43, 0x20, 0xD9, 0x8F, 0x69, 0x43, 0x20, + 0xD9, 0x90, 0x6D, 0x43, 0x20, 0xD9, 0x91, 0x71, + 0x43, 0x20, 0xD9, 0x92, 0x75, 0x43, 0x41, 0xCC, + 0x8A, 0xC9, 0x43, 0x73, 0xCC, 0x87, 0xC9, 0x44, + 0x20, 0xE3, 0x82, 0x99, 0x0D, 0x44, 0x20, 0xE3, + 0x82, 0x9A, 0x0D, 0x44, 0xC2, 0xA8, 0xCC, 0x81, + 0xCA, 0x44, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x44, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x97, + // Bytes 4300 - 433f + 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x99, 0xCC, 0x81, + 0xC9, 0x44, 0xCE, 0x9F, 0xCC, 0x81, 0xC9, 0x44, + 0xCE, 0xA5, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, + 0xCC, 0x88, 0xC9, 0x44, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x44, 0xCE, 0xB1, 0xCC, 0x81, 0xC9, 0x44, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB7, + 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB9, 0xCC, 0x81, + 0xC9, 0x44, 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x44, + // Bytes 4340 - 437f + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x89, + 0xCC, 0x81, 0xC9, 0x44, 0xD7, 0x90, 0xD6, 0xB7, + 0x31, 0x44, 0xD7, 0x90, 0xD6, 0xB8, 0x35, 0x44, + 0xD7, 0x90, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x91, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBF, + 0x49, 0x44, 0xD7, 0x92, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x93, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x94, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x95, 0xD6, 0xB9, + // Bytes 4380 - 43bf + 0x39, 0x44, 0xD7, 0x95, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x96, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x98, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x99, 0xD6, 0xB4, + 0x25, 0x44, 0xD7, 0x99, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x9A, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9B, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBF, + 0x49, 0x44, 0xD7, 0x9C, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x9E, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA0, + // Bytes 43c0 - 43ff + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA1, 0xD6, 0xBC, + 0x41, 0x44, 0xD7, 0xA3, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0xA4, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, + 0xD6, 0xBF, 0x49, 0x44, 0xD7, 0xA6, 0xD6, 0xBC, + 0x41, 0x44, 0xD7, 0xA7, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0xA8, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA9, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD7, 0x81, + 0x4D, 0x44, 0xD7, 0xA9, 0xD7, 0x82, 0x51, 0x44, + // Bytes 4400 - 443f + 0xD7, 0xAA, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xB2, + 0xD6, 0xB7, 0x31, 0x44, 0xD8, 0xA7, 0xD9, 0x8B, + 0x59, 0x44, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x44, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x44, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x44, 0xD8, 0xB0, 0xD9, 0xB0, + 0x79, 0x44, 0xD8, 0xB1, 0xD9, 0xB0, 0x79, 0x44, + 0xD9, 0x80, 0xD9, 0x8B, 0x59, 0x44, 0xD9, 0x80, + 0xD9, 0x8E, 0x65, 0x44, 0xD9, 0x80, 0xD9, 0x8F, + // Bytes 4440 - 447f + 0x69, 0x44, 0xD9, 0x80, 0xD9, 0x90, 0x6D, 0x44, + 0xD9, 0x80, 0xD9, 0x91, 0x71, 0x44, 0xD9, 0x80, + 0xD9, 0x92, 0x75, 0x44, 0xD9, 0x87, 0xD9, 0xB0, + 0x79, 0x44, 0xD9, 0x88, 0xD9, 0x94, 0xC9, 0x44, + 0xD9, 0x89, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x8A, + 0xD9, 0x94, 0xC9, 0x44, 0xDB, 0x92, 0xD9, 0x94, + 0xC9, 0x44, 0xDB, 0x95, 0xD9, 0x94, 0xC9, 0x45, + 0x20, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x45, 0x20, + // Bytes 4480 - 44bf + 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, + 0x88, 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xD9, 0x8C, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, + // Bytes 44c0 - 44ff + 0x8D, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8E, + 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8F, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x90, 0xD9, 0x91, + 0x72, 0x45, 0x20, 0xD9, 0x91, 0xD9, 0xB0, 0x7A, + 0x45, 0xE2, 0xAB, 0x9D, 0xCC, 0xB8, 0x05, 0x46, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x46, + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x46, + 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, 0x81, 0x4E, 0x46, + // Bytes 4500 - 453f + 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, 0x82, 0x52, 0x46, + 0xD9, 0x80, 0xD9, 0x8E, 0xD9, 0x91, 0x72, 0x46, + 0xD9, 0x80, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x46, + 0xD9, 0x80, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x46, + 0xE0, 0xA4, 0x95, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0x96, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0x97, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0x9C, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xA1, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0xA2, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0xAB, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0xAF, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA6, 0xA1, 0xE0, 0xA6, 0xBC, 0x09, 0x46, + 0xE0, 0xA6, 0xA2, 0xE0, 0xA6, 0xBC, 0x09, 0x46, + 0xE0, 0xA6, 0xAF, 0xE0, 0xA6, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0x96, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0x97, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0x9C, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0xAB, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0xB2, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0xB8, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xAC, 0xA1, 0xE0, 0xAC, 0xBC, 0x09, 0x46, + 0xE0, 0xAC, 0xA2, 0xE0, 0xAC, 0xBC, 0x09, 0x46, + 0xE0, 0xBE, 0xB2, 0xE0, 0xBE, 0x80, 0x9D, 0x46, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0xB3, 0xE0, 0xBE, 0x80, 0x9D, 0x46, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, 0x48, + 0xF0, 0x9D, 0x85, 0x97, 0xF0, 0x9D, 0x85, 0xA5, + 0xAD, 0x48, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xB9, + 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D, + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x49, + 0xE0, 0xBE, 0xB2, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + // Bytes 4600 - 463f + 0x80, 0x9E, 0x49, 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, + 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB0, 0xAE, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + // Bytes 4640 - 467f + 0xF0, 0x9D, 0x85, 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xB2, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, + 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, + // Bytes 4680 - 46bf + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAF, 0xAE, 0x83, 0x41, 0xCC, 0x82, 0xC9, + 0x83, 0x41, 0xCC, 0x86, 0xC9, 0x83, 0x41, 0xCC, + 0x87, 0xC9, 0x83, 0x41, 0xCC, 0x88, 0xC9, 0x83, + 0x41, 0xCC, 0x8A, 0xC9, 0x83, 0x41, 0xCC, 0xA3, + 0xB5, 0x83, 0x43, 0xCC, 0xA7, 0xA5, 0x83, 0x45, + 0xCC, 0x82, 0xC9, 0x83, 0x45, 0xCC, 0x84, 0xC9, + 0x83, 0x45, 0xCC, 0xA3, 0xB5, 0x83, 0x45, 0xCC, + // Bytes 46c0 - 46ff + 0xA7, 0xA5, 0x83, 0x49, 0xCC, 0x88, 0xC9, 0x83, + 0x4C, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0x82, + 0xC9, 0x83, 0x4F, 0xCC, 0x83, 0xC9, 0x83, 0x4F, + 0xCC, 0x84, 0xC9, 0x83, 0x4F, 0xCC, 0x87, 0xC9, + 0x83, 0x4F, 0xCC, 0x88, 0xC9, 0x83, 0x4F, 0xCC, + 0x9B, 0xAD, 0x83, 0x4F, 0xCC, 0xA3, 0xB5, 0x83, + 0x4F, 0xCC, 0xA8, 0xA5, 0x83, 0x52, 0xCC, 0xA3, + 0xB5, 0x83, 0x53, 0xCC, 0x81, 0xC9, 0x83, 0x53, + // Bytes 4700 - 473f + 0xCC, 0x8C, 0xC9, 0x83, 0x53, 0xCC, 0xA3, 0xB5, + 0x83, 0x55, 0xCC, 0x83, 0xC9, 0x83, 0x55, 0xCC, + 0x84, 0xC9, 0x83, 0x55, 0xCC, 0x88, 0xC9, 0x83, + 0x55, 0xCC, 0x9B, 0xAD, 0x83, 0x61, 0xCC, 0x82, + 0xC9, 0x83, 0x61, 0xCC, 0x86, 0xC9, 0x83, 0x61, + 0xCC, 0x87, 0xC9, 0x83, 0x61, 0xCC, 0x88, 0xC9, + 0x83, 0x61, 0xCC, 0x8A, 0xC9, 0x83, 0x61, 0xCC, + 0xA3, 0xB5, 0x83, 0x63, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 4740 - 477f + 0x65, 0xCC, 0x82, 0xC9, 0x83, 0x65, 0xCC, 0x84, + 0xC9, 0x83, 0x65, 0xCC, 0xA3, 0xB5, 0x83, 0x65, + 0xCC, 0xA7, 0xA5, 0x83, 0x69, 0xCC, 0x88, 0xC9, + 0x83, 0x6C, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + 0x82, 0xC9, 0x83, 0x6F, 0xCC, 0x83, 0xC9, 0x83, + 0x6F, 0xCC, 0x84, 0xC9, 0x83, 0x6F, 0xCC, 0x87, + 0xC9, 0x83, 0x6F, 0xCC, 0x88, 0xC9, 0x83, 0x6F, + 0xCC, 0x9B, 0xAD, 0x83, 0x6F, 0xCC, 0xA3, 0xB5, + // Bytes 4780 - 47bf + 0x83, 0x6F, 0xCC, 0xA8, 0xA5, 0x83, 0x72, 0xCC, + 0xA3, 0xB5, 0x83, 0x73, 0xCC, 0x81, 0xC9, 0x83, + 0x73, 0xCC, 0x8C, 0xC9, 0x83, 0x73, 0xCC, 0xA3, + 0xB5, 0x83, 0x75, 0xCC, 0x83, 0xC9, 0x83, 0x75, + 0xCC, 0x84, 0xC9, 0x83, 0x75, 0xCC, 0x88, 0xC9, + 0x83, 0x75, 0xCC, 0x9B, 0xAD, 0x84, 0xCE, 0x91, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x91, 0xCC, 0x94, + 0xC9, 0x84, 0xCE, 0x95, 0xCC, 0x93, 0xC9, 0x84, + // Bytes 47c0 - 47ff + 0xCE, 0x95, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x97, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x94, + 0xC9, 0x84, 0xCE, 0x99, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0x99, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x9F, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x94, + 0xC9, 0x84, 0xCE, 0xA5, 0xCC, 0x94, 0xC9, 0x84, + 0xCE, 0xA9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xA9, + 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x80, + // Bytes 4800 - 483f + 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x81, 0xC9, 0x84, + 0xCE, 0xB1, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB1, + 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB1, 0xCD, 0x82, + 0xC9, 0x84, 0xCE, 0xB5, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0xB5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB7, + 0xCC, 0x80, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x81, + 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0xB7, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB7, + // Bytes 4840 - 487f + 0xCD, 0x82, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x88, + 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0xB9, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xBF, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x94, + 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x88, 0xC9, 0x84, + 0xCF, 0x85, 0xCC, 0x93, 0xC9, 0x84, 0xCF, 0x85, + 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x80, + 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x81, 0xC9, 0x84, + // Bytes 4880 - 48bf + 0xCF, 0x89, 0xCC, 0x93, 0xC9, 0x84, 0xCF, 0x89, + 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x89, 0xCD, 0x82, + 0xC9, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 48c0 - 48ff + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, + // Bytes 4900 - 493f + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 4940 - 497f + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 4980 - 49bf + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x42, 0xCC, 0x80, 0xC9, 0x32, 0x42, 0xCC, + 0x81, 0xC9, 0x32, 0x42, 0xCC, 0x93, 0xC9, 0x32, + // Bytes 49c0 - 49ff + 0x43, 0xE1, 0x85, 0xA1, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA2, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA3, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA4, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA5, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA6, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA7, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA8, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA9, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAA, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAB, + // Bytes 4a00 - 4a3f + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAC, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAD, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAE, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAF, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB0, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB1, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB2, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB3, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB4, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB5, 0x01, 0x00, 0x43, 0xE1, + // Bytes 4a40 - 4a7f + 0x86, 0xAA, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAC, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAD, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB0, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB2, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB3, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB4, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB5, 0x01, 0x00, 0x44, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x32, 0x43, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4a80 - 4abf + 0x03, 0x43, 0xE3, 0x82, 0x9A, 0x0D, 0x03, 0x46, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB2, 0x9E, 0x26, + 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, + 0x26, 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, + 0x9E, 0x26, 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10610 bytes (10.36 KiB). Checksum: 95e8869a9f81e5e6. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f72, 0xc1: 0x2f77, 0xc2: 0x468b, 0xc3: 0x2f7c, 0xc4: 0x469a, 0xc5: 0x469f, + 0xc6: 0xa000, 0xc7: 0x46a9, 0xc8: 0x2fe5, 0xc9: 0x2fea, 0xca: 0x46ae, 0xcb: 0x2ffe, + 0xcc: 0x3071, 0xcd: 0x3076, 0xce: 0x307b, 0xcf: 0x46c2, 0xd1: 0x3107, + 0xd2: 0x312a, 0xd3: 0x312f, 0xd4: 0x46cc, 0xd5: 0x46d1, 0xd6: 0x46e0, + 0xd8: 0xa000, 0xd9: 0x31b6, 0xda: 0x31bb, 0xdb: 0x31c0, 0xdc: 0x4712, 0xdd: 0x3238, + 0xe0: 0x327e, 0xe1: 0x3283, 0xe2: 0x471c, 0xe3: 0x3288, + 0xe4: 0x472b, 0xe5: 0x4730, 0xe6: 0xa000, 0xe7: 0x473a, 0xe8: 0x32f1, 0xe9: 0x32f6, + 0xea: 0x473f, 0xeb: 0x330a, 0xec: 0x3382, 0xed: 0x3387, 0xee: 0x338c, 0xef: 0x4753, + 0xf1: 0x3418, 0xf2: 0x343b, 0xf3: 0x3440, 0xf4: 0x475d, 0xf5: 0x4762, + 0xf6: 0x4771, 0xf8: 0xa000, 0xf9: 0x34cc, 0xfa: 0x34d1, 0xfb: 0x34d6, + 0xfc: 0x47a3, 0xfd: 0x3553, 0xff: 0x356c, + // Block 0x4, offset 0x100 + 0x100: 0x2f81, 0x101: 0x328d, 0x102: 0x4690, 0x103: 0x4721, 0x104: 0x2f9f, 0x105: 0x32ab, + 0x106: 0x2fb3, 0x107: 0x32bf, 0x108: 0x2fb8, 0x109: 0x32c4, 0x10a: 0x2fbd, 0x10b: 0x32c9, + 0x10c: 0x2fc2, 0x10d: 0x32ce, 0x10e: 0x2fcc, 0x10f: 0x32d8, + 0x112: 0x46b3, 0x113: 0x4744, 0x114: 0x2ff4, 0x115: 0x3300, 0x116: 0x2ff9, 0x117: 0x3305, + 0x118: 0x3017, 0x119: 0x3323, 0x11a: 0x3008, 0x11b: 0x3314, 0x11c: 0x3030, 0x11d: 0x333c, + 0x11e: 0x303a, 0x11f: 0x3346, 0x120: 0x303f, 0x121: 0x334b, 0x122: 0x3049, 0x123: 0x3355, + 0x124: 0x304e, 0x125: 0x335a, 0x128: 0x3080, 0x129: 0x3391, + 0x12a: 0x3085, 0x12b: 0x3396, 0x12c: 0x308a, 0x12d: 0x339b, 0x12e: 0x30ad, 0x12f: 0x33b9, + 0x130: 0x308f, 0x134: 0x30b7, 0x135: 0x33c3, + 0x136: 0x30cb, 0x137: 0x33dc, 0x139: 0x30d5, 0x13a: 0x33e6, 0x13b: 0x30df, + 0x13c: 0x33f0, 0x13d: 0x30da, 0x13e: 0x33eb, + // Block 0x5, offset 0x140 + 0x143: 0x3102, 0x144: 0x3413, 0x145: 0x311b, + 0x146: 0x342c, 0x147: 0x3111, 0x148: 0x3422, + 0x14c: 0x46d6, 0x14d: 0x4767, 0x14e: 0x3134, 0x14f: 0x3445, 0x150: 0x313e, 0x151: 0x344f, + 0x154: 0x315c, 0x155: 0x346d, 0x156: 0x3175, 0x157: 0x3486, + 0x158: 0x3166, 0x159: 0x3477, 0x15a: 0x46f9, 0x15b: 0x478a, 0x15c: 0x317f, 0x15d: 0x3490, + 0x15e: 0x318e, 0x15f: 0x349f, 0x160: 0x46fe, 0x161: 0x478f, 0x162: 0x31a7, 0x163: 0x34bd, + 0x164: 0x3198, 0x165: 0x34ae, 0x168: 0x4708, 0x169: 0x4799, + 0x16a: 0x470d, 0x16b: 0x479e, 0x16c: 0x31c5, 0x16d: 0x34db, 0x16e: 0x31cf, 0x16f: 0x34e5, + 0x170: 0x31d4, 0x171: 0x34ea, 0x172: 0x31f2, 0x173: 0x3508, 0x174: 0x3215, 0x175: 0x352b, + 0x176: 0x323d, 0x177: 0x3558, 0x178: 0x3251, 0x179: 0x3260, 0x17a: 0x3580, 0x17b: 0x326a, + 0x17c: 0x358a, 0x17d: 0x326f, 0x17e: 0x358f, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f8b, 0x18e: 0x3297, 0x18f: 0x3099, 0x190: 0x33a5, 0x191: 0x3143, + 0x192: 0x3454, 0x193: 0x31d9, 0x194: 0x34ef, 0x195: 0x39d2, 0x196: 0x3b61, 0x197: 0x39cb, + 0x198: 0x3b5a, 0x199: 0x39d9, 0x19a: 0x3b68, 0x19b: 0x39c4, 0x19c: 0x3b53, + 0x19e: 0x38b3, 0x19f: 0x3a42, 0x1a0: 0x38ac, 0x1a1: 0x3a3b, 0x1a2: 0x35b6, 0x1a3: 0x35c8, + 0x1a6: 0x3044, 0x1a7: 0x3350, 0x1a8: 0x30c1, 0x1a9: 0x33d2, + 0x1aa: 0x46ef, 0x1ab: 0x4780, 0x1ac: 0x3993, 0x1ad: 0x3b22, 0x1ae: 0x35da, 0x1af: 0x35e0, + 0x1b0: 0x33c8, 0x1b4: 0x302b, 0x1b5: 0x3337, + 0x1b8: 0x30fd, 0x1b9: 0x340e, 0x1ba: 0x38ba, 0x1bb: 0x3a49, + 0x1bc: 0x35b0, 0x1bd: 0x35c2, 0x1be: 0x35bc, 0x1bf: 0x35ce, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f90, 0x1c1: 0x329c, 0x1c2: 0x2f95, 0x1c3: 0x32a1, 0x1c4: 0x300d, 0x1c5: 0x3319, + 0x1c6: 0x3012, 0x1c7: 0x331e, 0x1c8: 0x309e, 0x1c9: 0x33aa, 0x1ca: 0x30a3, 0x1cb: 0x33af, + 0x1cc: 0x3148, 0x1cd: 0x3459, 0x1ce: 0x314d, 0x1cf: 0x345e, 0x1d0: 0x316b, 0x1d1: 0x347c, + 0x1d2: 0x3170, 0x1d3: 0x3481, 0x1d4: 0x31de, 0x1d5: 0x34f4, 0x1d6: 0x31e3, 0x1d7: 0x34f9, + 0x1d8: 0x3189, 0x1d9: 0x349a, 0x1da: 0x31a2, 0x1db: 0x34b8, + 0x1de: 0x305d, 0x1df: 0x3369, + 0x1e6: 0x4695, 0x1e7: 0x4726, 0x1e8: 0x46bd, 0x1e9: 0x474e, + 0x1ea: 0x3962, 0x1eb: 0x3af1, 0x1ec: 0x393f, 0x1ed: 0x3ace, 0x1ee: 0x46db, 0x1ef: 0x476c, + 0x1f0: 0x395b, 0x1f1: 0x3aea, 0x1f2: 0x3247, 0x1f3: 0x3562, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49b1, 0x241: 0x49b6, 0x242: 0x9932, 0x243: 0x49bb, 0x244: 0x4a74, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a4, + 0x286: 0x35ec, 0x287: 0x00ce, 0x288: 0x360a, 0x289: 0x3616, 0x28a: 0x3628, + 0x28c: 0x3646, 0x28e: 0x3658, 0x28f: 0x3676, 0x290: 0x3e0b, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x363a, 0x2ab: 0x366a, 0x2ac: 0x4801, 0x2ad: 0x369a, 0x2ae: 0x482b, 0x2af: 0x36ac, + 0x2b0: 0x3e73, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3724, 0x2c1: 0x3730, 0x2c3: 0x371e, + 0x2c6: 0xa000, 0x2c7: 0x370c, + 0x2cc: 0x3760, 0x2cd: 0x3748, 0x2ce: 0x3772, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3754, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d8, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3736, 0x302: 0x37ba, + 0x310: 0x3712, 0x311: 0x3796, + 0x312: 0x3718, 0x313: 0x379c, 0x316: 0x372a, 0x317: 0x37ae, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x382c, 0x31b: 0x3832, 0x31c: 0x373c, 0x31d: 0x37c0, + 0x31e: 0x3742, 0x31f: 0x37c6, 0x322: 0x374e, 0x323: 0x37d2, + 0x324: 0x375a, 0x325: 0x37de, 0x326: 0x3766, 0x327: 0x37ea, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3838, 0x32b: 0x383e, 0x32c: 0x3790, 0x32d: 0x3814, 0x32e: 0x376c, 0x32f: 0x37f0, + 0x330: 0x3778, 0x331: 0x37fc, 0x332: 0x377e, 0x333: 0x3802, 0x334: 0x3784, 0x335: 0x3808, + 0x338: 0x378a, 0x339: 0x380e, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d, + 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d, + 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d, + 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132, + 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132, + 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d29, 0x407: 0xa000, 0x408: 0x2d31, 0x409: 0xa000, 0x40a: 0x2d39, 0x40b: 0xa000, + 0x40c: 0x2d41, 0x40d: 0xa000, 0x40e: 0x2d49, 0x411: 0xa000, + 0x412: 0x2d51, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d59, + 0x43c: 0xa000, 0x43d: 0x2d61, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132, + 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132, + 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132, + 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132, + 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132, + 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132, + 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132, + 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132, + 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132, + 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132, + 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d, + // Block 0x12, offset 0x480 + 0x480: 0x2f9a, 0x481: 0x32a6, 0x482: 0x2fa4, 0x483: 0x32b0, 0x484: 0x2fa9, 0x485: 0x32b5, + 0x486: 0x2fae, 0x487: 0x32ba, 0x488: 0x38cf, 0x489: 0x3a5e, 0x48a: 0x2fc7, 0x48b: 0x32d3, + 0x48c: 0x2fd1, 0x48d: 0x32dd, 0x48e: 0x2fe0, 0x48f: 0x32ec, 0x490: 0x2fd6, 0x491: 0x32e2, + 0x492: 0x2fdb, 0x493: 0x32e7, 0x494: 0x38f2, 0x495: 0x3a81, 0x496: 0x38f9, 0x497: 0x3a88, + 0x498: 0x301c, 0x499: 0x3328, 0x49a: 0x3021, 0x49b: 0x332d, 0x49c: 0x3907, 0x49d: 0x3a96, + 0x49e: 0x3026, 0x49f: 0x3332, 0x4a0: 0x3035, 0x4a1: 0x3341, 0x4a2: 0x3053, 0x4a3: 0x335f, + 0x4a4: 0x3062, 0x4a5: 0x336e, 0x4a6: 0x3058, 0x4a7: 0x3364, 0x4a8: 0x3067, 0x4a9: 0x3373, + 0x4aa: 0x306c, 0x4ab: 0x3378, 0x4ac: 0x30b2, 0x4ad: 0x33be, 0x4ae: 0x390e, 0x4af: 0x3a9d, + 0x4b0: 0x30bc, 0x4b1: 0x33cd, 0x4b2: 0x30c6, 0x4b3: 0x33d7, 0x4b4: 0x30d0, 0x4b5: 0x33e1, + 0x4b6: 0x46c7, 0x4b7: 0x4758, 0x4b8: 0x3915, 0x4b9: 0x3aa4, 0x4ba: 0x30e9, 0x4bb: 0x33fa, + 0x4bc: 0x30e4, 0x4bd: 0x33f5, 0x4be: 0x30ee, 0x4bf: 0x33ff, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x30f3, 0x4c1: 0x3404, 0x4c2: 0x30f8, 0x4c3: 0x3409, 0x4c4: 0x310c, 0x4c5: 0x341d, + 0x4c6: 0x3116, 0x4c7: 0x3427, 0x4c8: 0x3125, 0x4c9: 0x3436, 0x4ca: 0x3120, 0x4cb: 0x3431, + 0x4cc: 0x3938, 0x4cd: 0x3ac7, 0x4ce: 0x3946, 0x4cf: 0x3ad5, 0x4d0: 0x394d, 0x4d1: 0x3adc, + 0x4d2: 0x3954, 0x4d3: 0x3ae3, 0x4d4: 0x3152, 0x4d5: 0x3463, 0x4d6: 0x3157, 0x4d7: 0x3468, + 0x4d8: 0x3161, 0x4d9: 0x3472, 0x4da: 0x46f4, 0x4db: 0x4785, 0x4dc: 0x399a, 0x4dd: 0x3b29, + 0x4de: 0x317a, 0x4df: 0x348b, 0x4e0: 0x3184, 0x4e1: 0x3495, 0x4e2: 0x4703, 0x4e3: 0x4794, + 0x4e4: 0x39a1, 0x4e5: 0x3b30, 0x4e6: 0x39a8, 0x4e7: 0x3b37, 0x4e8: 0x39af, 0x4e9: 0x3b3e, + 0x4ea: 0x3193, 0x4eb: 0x34a4, 0x4ec: 0x319d, 0x4ed: 0x34b3, 0x4ee: 0x31b1, 0x4ef: 0x34c7, + 0x4f0: 0x31ac, 0x4f1: 0x34c2, 0x4f2: 0x31ed, 0x4f3: 0x3503, 0x4f4: 0x31fc, 0x4f5: 0x3512, + 0x4f6: 0x31f7, 0x4f7: 0x350d, 0x4f8: 0x39b6, 0x4f9: 0x3b45, 0x4fa: 0x39bd, 0x4fb: 0x3b4c, + 0x4fc: 0x3201, 0x4fd: 0x3517, 0x4fe: 0x3206, 0x4ff: 0x351c, + // Block 0x14, offset 0x500 + 0x500: 0x320b, 0x501: 0x3521, 0x502: 0x3210, 0x503: 0x3526, 0x504: 0x321f, 0x505: 0x3535, + 0x506: 0x321a, 0x507: 0x3530, 0x508: 0x3224, 0x509: 0x353f, 0x50a: 0x3229, 0x50b: 0x3544, + 0x50c: 0x322e, 0x50d: 0x3549, 0x50e: 0x324c, 0x50f: 0x3567, 0x510: 0x3265, 0x511: 0x3585, + 0x512: 0x3274, 0x513: 0x3594, 0x514: 0x3279, 0x515: 0x3599, 0x516: 0x337d, 0x517: 0x34a9, + 0x518: 0x353a, 0x519: 0x3576, 0x51b: 0x35d4, + 0x520: 0x46a4, 0x521: 0x4735, 0x522: 0x2f86, 0x523: 0x3292, + 0x524: 0x387b, 0x525: 0x3a0a, 0x526: 0x3874, 0x527: 0x3a03, 0x528: 0x3889, 0x529: 0x3a18, + 0x52a: 0x3882, 0x52b: 0x3a11, 0x52c: 0x38c1, 0x52d: 0x3a50, 0x52e: 0x3897, 0x52f: 0x3a26, + 0x530: 0x3890, 0x531: 0x3a1f, 0x532: 0x38a5, 0x533: 0x3a34, 0x534: 0x389e, 0x535: 0x3a2d, + 0x536: 0x38c8, 0x537: 0x3a57, 0x538: 0x46b8, 0x539: 0x4749, 0x53a: 0x3003, 0x53b: 0x330f, + 0x53c: 0x2fef, 0x53d: 0x32fb, 0x53e: 0x38dd, 0x53f: 0x3a6c, + // Block 0x15, offset 0x540 + 0x540: 0x38d6, 0x541: 0x3a65, 0x542: 0x38eb, 0x543: 0x3a7a, 0x544: 0x38e4, 0x545: 0x3a73, + 0x546: 0x3900, 0x547: 0x3a8f, 0x548: 0x3094, 0x549: 0x33a0, 0x54a: 0x30a8, 0x54b: 0x33b4, + 0x54c: 0x46ea, 0x54d: 0x477b, 0x54e: 0x3139, 0x54f: 0x344a, 0x550: 0x3923, 0x551: 0x3ab2, + 0x552: 0x391c, 0x553: 0x3aab, 0x554: 0x3931, 0x555: 0x3ac0, 0x556: 0x392a, 0x557: 0x3ab9, + 0x558: 0x398c, 0x559: 0x3b1b, 0x55a: 0x3970, 0x55b: 0x3aff, 0x55c: 0x3969, 0x55d: 0x3af8, + 0x55e: 0x397e, 0x55f: 0x3b0d, 0x560: 0x3977, 0x561: 0x3b06, 0x562: 0x3985, 0x563: 0x3b14, + 0x564: 0x31e8, 0x565: 0x34fe, 0x566: 0x31ca, 0x567: 0x34e0, 0x568: 0x39e7, 0x569: 0x3b76, + 0x56a: 0x39e0, 0x56b: 0x3b6f, 0x56c: 0x39f5, 0x56d: 0x3b84, 0x56e: 0x39ee, 0x56f: 0x3b7d, + 0x570: 0x39fc, 0x571: 0x3b8b, 0x572: 0x3233, 0x573: 0x354e, 0x574: 0x325b, 0x575: 0x357b, + 0x576: 0x3256, 0x577: 0x3571, 0x578: 0x3242, 0x579: 0x355d, + // Block 0x16, offset 0x580 + 0x580: 0x4807, 0x581: 0x480d, 0x582: 0x4921, 0x583: 0x4939, 0x584: 0x4929, 0x585: 0x4941, + 0x586: 0x4931, 0x587: 0x4949, 0x588: 0x47ad, 0x589: 0x47b3, 0x58a: 0x4891, 0x58b: 0x48a9, + 0x58c: 0x4899, 0x58d: 0x48b1, 0x58e: 0x48a1, 0x58f: 0x48b9, 0x590: 0x4819, 0x591: 0x481f, + 0x592: 0x3dbb, 0x593: 0x3dcb, 0x594: 0x3dc3, 0x595: 0x3dd3, + 0x598: 0x47b9, 0x599: 0x47bf, 0x59a: 0x3ceb, 0x59b: 0x3cfb, 0x59c: 0x3cf3, 0x59d: 0x3d03, + 0x5a0: 0x4831, 0x5a1: 0x4837, 0x5a2: 0x4951, 0x5a3: 0x4969, + 0x5a4: 0x4959, 0x5a5: 0x4971, 0x5a6: 0x4961, 0x5a7: 0x4979, 0x5a8: 0x47c5, 0x5a9: 0x47cb, + 0x5aa: 0x48c1, 0x5ab: 0x48d9, 0x5ac: 0x48c9, 0x5ad: 0x48e1, 0x5ae: 0x48d1, 0x5af: 0x48e9, + 0x5b0: 0x4849, 0x5b1: 0x484f, 0x5b2: 0x3e1b, 0x5b3: 0x3e33, 0x5b4: 0x3e23, 0x5b5: 0x3e3b, + 0x5b6: 0x3e2b, 0x5b7: 0x3e43, 0x5b8: 0x47d1, 0x5b9: 0x47d7, 0x5ba: 0x3d1b, 0x5bb: 0x3d33, + 0x5bc: 0x3d23, 0x5bd: 0x3d3b, 0x5be: 0x3d2b, 0x5bf: 0x3d43, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4855, 0x5c1: 0x485b, 0x5c2: 0x3e4b, 0x5c3: 0x3e5b, 0x5c4: 0x3e53, 0x5c5: 0x3e63, + 0x5c8: 0x47dd, 0x5c9: 0x47e3, 0x5ca: 0x3d4b, 0x5cb: 0x3d5b, + 0x5cc: 0x3d53, 0x5cd: 0x3d63, 0x5d0: 0x4867, 0x5d1: 0x486d, + 0x5d2: 0x3e83, 0x5d3: 0x3e9b, 0x5d4: 0x3e8b, 0x5d5: 0x3ea3, 0x5d6: 0x3e93, 0x5d7: 0x3eab, + 0x5d9: 0x47e9, 0x5db: 0x3d6b, 0x5dd: 0x3d73, + 0x5df: 0x3d7b, 0x5e0: 0x487f, 0x5e1: 0x4885, 0x5e2: 0x4981, 0x5e3: 0x4999, + 0x5e4: 0x4989, 0x5e5: 0x49a1, 0x5e6: 0x4991, 0x5e7: 0x49a9, 0x5e8: 0x47ef, 0x5e9: 0x47f5, + 0x5ea: 0x48f1, 0x5eb: 0x4909, 0x5ec: 0x48f9, 0x5ed: 0x4911, 0x5ee: 0x4901, 0x5ef: 0x4919, + 0x5f0: 0x47fb, 0x5f1: 0x4321, 0x5f2: 0x3694, 0x5f3: 0x4327, 0x5f4: 0x4825, 0x5f5: 0x432d, + 0x5f6: 0x36a6, 0x5f7: 0x4333, 0x5f8: 0x36c4, 0x5f9: 0x4339, 0x5fa: 0x36dc, 0x5fb: 0x433f, + 0x5fc: 0x4873, 0x5fd: 0x4345, + // Block 0x18, offset 0x600 + 0x600: 0x3da3, 0x601: 0x3dab, 0x602: 0x4187, 0x603: 0x41a5, 0x604: 0x4191, 0x605: 0x41af, + 0x606: 0x419b, 0x607: 0x41b9, 0x608: 0x3cdb, 0x609: 0x3ce3, 0x60a: 0x40d3, 0x60b: 0x40f1, + 0x60c: 0x40dd, 0x60d: 0x40fb, 0x60e: 0x40e7, 0x60f: 0x4105, 0x610: 0x3deb, 0x611: 0x3df3, + 0x612: 0x41c3, 0x613: 0x41e1, 0x614: 0x41cd, 0x615: 0x41eb, 0x616: 0x41d7, 0x617: 0x41f5, + 0x618: 0x3d0b, 0x619: 0x3d13, 0x61a: 0x410f, 0x61b: 0x412d, 0x61c: 0x4119, 0x61d: 0x4137, + 0x61e: 0x4123, 0x61f: 0x4141, 0x620: 0x3ec3, 0x621: 0x3ecb, 0x622: 0x41ff, 0x623: 0x421d, + 0x624: 0x4209, 0x625: 0x4227, 0x626: 0x4213, 0x627: 0x4231, 0x628: 0x3d83, 0x629: 0x3d8b, + 0x62a: 0x414b, 0x62b: 0x4169, 0x62c: 0x4155, 0x62d: 0x4173, 0x62e: 0x415f, 0x62f: 0x417d, + 0x630: 0x3688, 0x631: 0x3682, 0x632: 0x3d93, 0x633: 0x368e, 0x634: 0x3d9b, + 0x636: 0x4813, 0x637: 0x3db3, 0x638: 0x35f8, 0x639: 0x35f2, 0x63a: 0x35e6, 0x63b: 0x42f1, + 0x63c: 0x35fe, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35aa, 0x642: 0x3ddb, 0x643: 0x36a0, 0x644: 0x3de3, + 0x646: 0x483d, 0x647: 0x3dfb, 0x648: 0x3604, 0x649: 0x42f7, 0x64a: 0x3610, 0x64b: 0x42fd, + 0x64c: 0x361c, 0x64d: 0x3b92, 0x64e: 0x3b99, 0x64f: 0x3ba0, 0x650: 0x36b8, 0x651: 0x36b2, + 0x652: 0x3e03, 0x653: 0x44e7, 0x656: 0x36be, 0x657: 0x3e13, + 0x658: 0x3634, 0x659: 0x362e, 0x65a: 0x3622, 0x65b: 0x4303, 0x65d: 0x3ba7, + 0x65e: 0x3bae, 0x65f: 0x3bb5, 0x660: 0x36ee, 0x661: 0x36e8, 0x662: 0x3e6b, 0x663: 0x44ef, + 0x664: 0x36d0, 0x665: 0x36d6, 0x666: 0x36f4, 0x667: 0x3e7b, 0x668: 0x3664, 0x669: 0x365e, + 0x66a: 0x3652, 0x66b: 0x430f, 0x66c: 0x364c, 0x66d: 0x359e, 0x66e: 0x42eb, 0x66f: 0x0081, + 0x672: 0x3eb3, 0x673: 0x36fa, 0x674: 0x3ebb, + 0x676: 0x488b, 0x677: 0x3ed3, 0x678: 0x3640, 0x679: 0x4309, 0x67a: 0x3670, 0x67b: 0x431b, + 0x67c: 0x367c, 0x67d: 0x4259, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c09, 0x683: 0xa000, 0x684: 0x3c10, 0x685: 0xa000, + 0x687: 0x3c17, 0x688: 0xa000, 0x689: 0x3c1e, + 0x68d: 0xa000, + 0x6a0: 0x2f68, 0x6a1: 0xa000, 0x6a2: 0x3c2c, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c25, 0x6ae: 0x2f63, 0x6af: 0x2f6d, + 0x6b0: 0x3c33, 0x6b1: 0x3c3a, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c41, 0x6b5: 0x3c48, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4f, 0x6b9: 0x3c56, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c5d, 0x6c1: 0x3c64, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c79, 0x6c5: 0x3c80, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c87, 0x6c9: 0x3c8e, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3ca3, 0x6ed: 0x3caa, 0x6ee: 0x3cb1, 0x6ef: 0x3cb8, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f0b, 0x70d: 0xa000, 0x70e: 0x3f13, 0x70f: 0xa000, 0x710: 0x3f1b, 0x711: 0xa000, + 0x712: 0x3f23, 0x713: 0xa000, 0x714: 0x3f2b, 0x715: 0xa000, 0x716: 0x3f33, 0x717: 0xa000, + 0x718: 0x3f3b, 0x719: 0xa000, 0x71a: 0x3f43, 0x71b: 0xa000, 0x71c: 0x3f4b, 0x71d: 0xa000, + 0x71e: 0x3f53, 0x71f: 0xa000, 0x720: 0x3f5b, 0x721: 0xa000, 0x722: 0x3f63, + 0x724: 0xa000, 0x725: 0x3f6b, 0x726: 0xa000, 0x727: 0x3f73, 0x728: 0xa000, 0x729: 0x3f7b, + 0x72f: 0xa000, + 0x730: 0x3f83, 0x731: 0x3f8b, 0x732: 0xa000, 0x733: 0x3f93, 0x734: 0x3f9b, 0x735: 0xa000, + 0x736: 0x3fa3, 0x737: 0x3fab, 0x738: 0xa000, 0x739: 0x3fb3, 0x73a: 0x3fbb, 0x73b: 0xa000, + 0x73c: 0x3fc3, 0x73d: 0x3fcb, + // Block 0x1d, offset 0x740 + 0x754: 0x3f03, + 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fd3, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3fe3, 0x76d: 0xa000, 0x76e: 0x3feb, 0x76f: 0xa000, + 0x770: 0x3ff3, 0x771: 0xa000, 0x772: 0x3ffb, 0x773: 0xa000, 0x774: 0x4003, 0x775: 0xa000, + 0x776: 0x400b, 0x777: 0xa000, 0x778: 0x4013, 0x779: 0xa000, 0x77a: 0x401b, 0x77b: 0xa000, + 0x77c: 0x4023, 0x77d: 0xa000, 0x77e: 0x402b, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4033, 0x781: 0xa000, 0x782: 0x403b, 0x784: 0xa000, 0x785: 0x4043, + 0x786: 0xa000, 0x787: 0x404b, 0x788: 0xa000, 0x789: 0x4053, + 0x78f: 0xa000, 0x790: 0x405b, 0x791: 0x4063, + 0x792: 0xa000, 0x793: 0x406b, 0x794: 0x4073, 0x795: 0xa000, 0x796: 0x407b, 0x797: 0x4083, + 0x798: 0xa000, 0x799: 0x408b, 0x79a: 0x4093, 0x79b: 0xa000, 0x79c: 0x409b, 0x79d: 0x40a3, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fdb, + 0x7b7: 0x40ab, 0x7b8: 0x40b3, 0x7b9: 0x40bb, 0x7ba: 0x40c3, + 0x7bd: 0xa000, 0x7be: 0x40cb, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb, + 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943, + 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3, + 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43, + 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87, + 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283, + 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f, + 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853, + 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b, + 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b, + 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b, + // Block 0x20, offset 0x800 + 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b, + 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f, + 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7, + 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127, + 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357, + 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873, + 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3, + 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b, + 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57, + 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb, + 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b, + // Block 0x21, offset 0x840 + 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f, + 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3, + 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83, + 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193, + 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b, + 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b, + 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f, + 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b, + 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753, + 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777, + 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73, + // Block 0x22, offset 0x880 + 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3, + 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47, + 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af, + 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df, + 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817, + 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3, + 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457, + 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b, + 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27, + 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f, + 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03, + 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27, + 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af, + 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3, + 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb, + 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353, + 0x8e5: 0x1407, 0x8e6: 0x1433, + 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7, + 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897, + 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93, + 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3, + // Block 0x24, offset 0x900 + 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b, + 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f, + 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f, + 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f, + 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff, + 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f, + 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f, + 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3, + 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7, + 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963, + 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f, + // Block 0x25, offset 0x940 + 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b, + 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb, + 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf, + 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f, + 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013, + 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f, + 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b, + 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b, + 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb, + 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343, + 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f, + // Block 0x26, offset 0x980 + 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b, + 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b, + 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2, + 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809, + 0x998: 0x1617, 0x999: 0x1627, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757, + 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773, + 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3, + 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf, + 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff, + 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f, + 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867, + 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af, + 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93, + 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3, + 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917, + // Block 0x28, offset 0xa00 + 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f, + 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983, + 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf, + 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3, + 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef, + 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23, + 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37, + 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63, + 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f, + 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692, + 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7, + // Block 0x29, offset 0xa40 + 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb, + 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f, + 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6, + 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9, + 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83, + 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3, + 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf, + 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7, + 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f, + 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b, + 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87, + 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb, + 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7, + 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663, + 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd, + 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7, + 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b, + 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f, + 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7, + 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700, + 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23, + 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53, + 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714, + 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b, + 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719, + 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728, + 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37, + 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57, + 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737, + 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741, + 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff, + 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637, + 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f, + 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093, + 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782, + 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3, + 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7, + 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133, + 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa, + 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4, + 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7, + 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7, + 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b, + 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd, + 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f, + 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f, + 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273, + 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677, + 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7, + 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb, + 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5, + 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa, + 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b, + 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7, + 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665, + 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f, + 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477, + 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693, + 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb, + 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b, + 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567, + 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7, + 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7, + 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef, + 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + 0x374: 0xa1, + 0x37d: 0xa2, + // Block 0xe, offset 0x380 + 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6, + 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa, + 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf, + 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1, + 0x3a0: 0xb2, 0x3a7: 0xb3, + 0x3a8: 0xb4, 0x3a9: 0xb5, 0x3aa: 0xb6, + 0x3b0: 0x73, 0x3b5: 0xb7, 0x3b6: 0xb8, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb9, 0x3ec: 0xba, + // Block 0x10, offset 0x400 + 0x432: 0xbb, + // Block 0x11, offset 0x440 + 0x445: 0xbc, 0x446: 0xbd, 0x447: 0xbe, + 0x449: 0xbf, + // Block 0x12, offset 0x480 + 0x480: 0xc0, 0x484: 0xba, + 0x48b: 0xc1, + 0x4a3: 0xc2, 0x4a5: 0xc3, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc4, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 151 entries, 302 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xd0, 0xd2, 0xd7, 0xe8, 0xf4, 0xf6, 0xfc, 0xfe, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10b, 0x10e, 0x110, 0x113, 0x116, 0x11a, 0x11f, 0x128, 0x12a, 0x12d, 0x12f, 0x13a, 0x13e, 0x14c, 0x14f, 0x155, 0x15b, 0x166, 0x16a, 0x16c, 0x16e, 0x170, 0x172, 0x174, 0x17a, 0x17e, 0x180, 0x182, 0x18a, 0x18e, 0x191, 0x193, 0x195, 0x197, 0x19a, 0x19c, 0x19e, 0x1a0, 0x1a2, 0x1a8, 0x1ab, 0x1ad, 0x1b4, 0x1ba, 0x1c0, 0x1c8, 0x1ce, 0x1d4, 0x1da, 0x1de, 0x1ec, 0x1f5, 0x1f8, 0x1fb, 0x1fd, 0x200, 0x202, 0x206, 0x20b, 0x20d, 0x20f, 0x214, 0x21a, 0x21c, 0x21e, 0x220, 0x226, 0x229, 0x22b, 0x231, 0x234, 0x23c, 0x243, 0x246, 0x249, 0x24b, 0x24e, 0x256, 0x25a, 0x261, 0x264, 0x26a, 0x26c, 0x26f, 0x271, 0x274, 0x276, 0x278, 0x27a, 0x27c, 0x27f, 0x281, 0x283, 0x285, 0x287, 0x294, 0x29e, 0x2a0, 0x2a2, 0x2a8, 0x2aa, 0x2ac, 0x2af} + +// nfcSparseValues: 689 entries, 2756 bytes +var nfcSparseValues = [689]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e5, lo: 0xa0, hi: 0xa1}, + {value: 0x4717, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4843, lo: 0x8a, hi: 0x8a}, + {value: 0x4861, lo: 0x8b, hi: 0x8b}, + {value: 0x36ca, lo: 0x8c, hi: 0x8c}, + {value: 0x36e2, lo: 0x8d, hi: 0x8d}, + {value: 0x4879, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3700, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a8, lo: 0x90, hi: 0x90}, + {value: 0x37b4, lo: 0x91, hi: 0x91}, + {value: 0x37a2, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x381a, lo: 0x97, hi: 0x97}, + {value: 0x37e4, lo: 0x9c, hi: 0x9c}, + {value: 0x37cc, lo: 0x9d, hi: 0x9d}, + {value: 0x37f6, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3820, lo: 0xb6, hi: 0xb6}, + {value: 0x3826, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3844, lo: 0xa2, hi: 0xa2}, + {value: 0x384a, lo: 0xa3, hi: 0xa3}, + {value: 0x3856, lo: 0xa4, hi: 0xa4}, + {value: 0x3850, lo: 0xa5, hi: 0xa5}, + {value: 0x385c, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386e, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3862, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3868, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3edb, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee3, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eeb, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451f, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ca1, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455f, lo: 0x9c, hi: 0x9d}, + {value: 0x456f, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x4597, lo: 0xb3, hi: 0xb3}, + {value: 0x459f, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4577, lo: 0x99, hi: 0x9b}, + {value: 0x458f, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb9, lo: 0x88, hi: 0x88}, + {value: 0x2cb1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cc1, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a7, lo: 0x9c, hi: 0x9c}, + {value: 0x45af, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc9, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cd1, lo: 0x8a, hi: 0x8a}, + {value: 0x2ce1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd9, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef3, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2ce9, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cf1, lo: 0x87, hi: 0x87}, + {value: 0x2cf9, lo: 0x88, hi: 0x88}, + {value: 0x2f53, lo: 0x8a, hi: 0x8a}, + {value: 0x2ddb, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d01, lo: 0x8a, hi: 0x8a}, + {value: 0x2d11, lo: 0x8b, hi: 0x8b}, + {value: 0x2d09, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6be7, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3efb, lo: 0x9a, hi: 0x9a}, + {value: 0x2f5b, lo: 0x9c, hi: 0x9c}, + {value: 0x2de6, lo: 0x9d, hi: 0x9d}, + {value: 0x2d19, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x02}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xd0 + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd2 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd7 + {value: 0x0000, lo: 0x10}, + {value: 0x2647, lo: 0x83, hi: 0x83}, + {value: 0x264e, lo: 0x8d, hi: 0x8d}, + {value: 0x2655, lo: 0x92, hi: 0x92}, + {value: 0x265c, lo: 0x97, hi: 0x97}, + {value: 0x2663, lo: 0x9c, hi: 0x9c}, + {value: 0x2640, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a87, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a90, lo: 0xb5, hi: 0xb5}, + {value: 0x45b7, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bf, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe8 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a99, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x2671, lo: 0x93, hi: 0x93}, + {value: 0x2678, lo: 0x9d, hi: 0x9d}, + {value: 0x267f, lo: 0xa2, hi: 0xa2}, + {value: 0x2686, lo: 0xa7, hi: 0xa7}, + {value: 0x268d, lo: 0xac, hi: 0xac}, + {value: 0x266a, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf4 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf6 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d21, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfc + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfe + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x108 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x110 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x113 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x116 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x11f + {value: 0x0000, lo: 0x08}, + {value: 0x2d69, lo: 0x80, hi: 0x80}, + {value: 0x2d71, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d79, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x128 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x12a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x12d + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x12f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x13a + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3c, offset 0x13e + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3d, offset 0x14c + {value: 0x427e, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3e, offset 0x14f + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bbc, lo: 0x9a, hi: 0x9b}, + {value: 0x3bca, lo: 0xae, hi: 0xae}, + // Block 0x3f, offset 0x155 + {value: 0x000e, lo: 0x05}, + {value: 0x3bd1, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd8, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x40, offset 0x15b + {value: 0x6405, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be6, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bed, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf4, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bfb, lo: 0xa4, hi: 0xa5}, + {value: 0x3c02, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x41, offset 0x166 + {value: 0x0007, lo: 0x03}, + {value: 0x3c6b, lo: 0xa0, hi: 0xa1}, + {value: 0x3c95, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbf, lo: 0xaa, hi: 0xad}, + // Block 0x42, offset 0x16a + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x43, offset 0x16c + {value: 0x0000, lo: 0x01}, + {value: 0x44e0, lo: 0x9c, hi: 0x9c}, + // Block 0x44, offset 0x16e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x45, offset 0x170 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x46, offset 0x172 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x47, offset 0x174 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x48, offset 0x17a + {value: 0x0000, lo: 0x03}, + {value: 0x4aa2, lo: 0xb3, hi: 0xb3}, + {value: 0x4aa2, lo: 0xb5, hi: 0xb6}, + {value: 0x4aa2, lo: 0xba, hi: 0xbf}, + // Block 0x49, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x4aa2, lo: 0x8f, hi: 0xa3}, + // Block 0x4a, offset 0x180 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4b, offset 0x182 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4d, offset 0x18e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4e, offset 0x191 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x4f, offset 0x193 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x50, offset 0x195 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x51, offset 0x197 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x52, offset 0x19a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x53, offset 0x19c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x54, offset 0x19e + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x55, offset 0x1a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x56, offset 0x1a2 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x57, offset 0x1a8 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x58, offset 0x1ab + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x59, offset 0x1ad + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5a, offset 0x1b4 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5b, offset 0x1ba + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5c, offset 0x1c0 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5d, offset 0x1c8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5e, offset 0x1ce + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x5f, offset 0x1d4 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x60, offset 0x1da + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x61, offset 0x1de + {value: 0x0006, lo: 0x0d}, + {value: 0x4393, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4405, lo: 0x9f, hi: 0x9f}, + {value: 0x43f3, lo: 0xaa, hi: 0xab}, + {value: 0x44f7, lo: 0xac, hi: 0xac}, + {value: 0x44ff, lo: 0xad, hi: 0xad}, + {value: 0x434b, lo: 0xae, hi: 0xb1}, + {value: 0x4369, lo: 0xb2, hi: 0xb4}, + {value: 0x4381, lo: 0xb5, hi: 0xb6}, + {value: 0x438d, lo: 0xb8, hi: 0xb8}, + {value: 0x4399, lo: 0xb9, hi: 0xbb}, + {value: 0x43b1, lo: 0xbc, hi: 0xbc}, + {value: 0x43b7, lo: 0xbe, hi: 0xbe}, + // Block 0x62, offset 0x1ec + {value: 0x0006, lo: 0x08}, + {value: 0x43bd, lo: 0x80, hi: 0x81}, + {value: 0x43c9, lo: 0x83, hi: 0x84}, + {value: 0x43db, lo: 0x86, hi: 0x89}, + {value: 0x43ff, lo: 0x8a, hi: 0x8a}, + {value: 0x437b, lo: 0x8b, hi: 0x8b}, + {value: 0x4363, lo: 0x8c, hi: 0x8c}, + {value: 0x43ab, lo: 0x8d, hi: 0x8d}, + {value: 0x43d5, lo: 0x8e, hi: 0x8e}, + // Block 0x63, offset 0x1f5 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x64, offset 0x1f8 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x65, offset 0x1fb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x66, offset 0x1fd + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x67, offset 0x200 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x68, offset 0x202 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x69, offset 0x206 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6a, offset 0x20b + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6b, offset 0x20d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6c, offset 0x20f + {value: 0x0000, lo: 0x04}, + {value: 0x4aa2, lo: 0x9e, hi: 0x9f}, + {value: 0x4aa2, lo: 0xa3, hi: 0xa3}, + {value: 0x4aa2, lo: 0xa5, hi: 0xa6}, + {value: 0x4aa2, lo: 0xaa, hi: 0xaf}, + // Block 0x6d, offset 0x214 + {value: 0x0000, lo: 0x05}, + {value: 0x4aa2, lo: 0x82, hi: 0x87}, + {value: 0x4aa2, lo: 0x8a, hi: 0x8f}, + {value: 0x4aa2, lo: 0x92, hi: 0x97}, + {value: 0x4aa2, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6e, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6f, offset 0x21c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x70, offset 0x21e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x71, offset 0x220 + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x226 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x73, offset 0x229 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x74, offset 0x22b + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x231 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x234 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x423b, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4245, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424f, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x23c + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d81, lo: 0xae, hi: 0xae}, + {value: 0x2d8b, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x243 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x246 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x249 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x24b + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7c, offset 0x24e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d95, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9f, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7d, offset 0x256 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7e, offset 0x25a + {value: 0x6b57, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db3, lo: 0xbb, hi: 0xbb}, + {value: 0x2da9, lo: 0xbc, hi: 0xbd}, + {value: 0x2dbd, lo: 0xbe, hi: 0xbe}, + // Block 0x7f, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x80, offset 0x264 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc7, lo: 0xba, hi: 0xba}, + {value: 0x2dd1, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x26a + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x82, offset 0x26c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x83, offset 0x26f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x84, offset 0x271 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x85, offset 0x274 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + // Block 0x86, offset 0x276 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x87, offset 0x278 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x88, offset 0x27a + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x89, offset 0x27c + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x8a, offset 0x27f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x8b, offset 0x281 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8c, offset 0x283 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8d, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8e, offset 0x287 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cf, lo: 0x9e, hi: 0x9e}, + {value: 0x45d9, lo: 0x9f, hi: 0x9f}, + {value: 0x460d, lo: 0xa0, hi: 0xa0}, + {value: 0x461b, lo: 0xa1, hi: 0xa1}, + {value: 0x4629, lo: 0xa2, hi: 0xa2}, + {value: 0x4637, lo: 0xa3, hi: 0xa3}, + {value: 0x4645, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8f, offset 0x294 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e3, lo: 0xbb, hi: 0xbb}, + {value: 0x45ed, lo: 0xbc, hi: 0xbc}, + {value: 0x4653, lo: 0xbd, hi: 0xbd}, + {value: 0x466f, lo: 0xbe, hi: 0xbe}, + {value: 0x4661, lo: 0xbf, hi: 0xbf}, + // Block 0x90, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x467d, lo: 0x80, hi: 0x80}, + // Block 0x91, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x92, offset 0x2a2 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x93, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xac, hi: 0xaf}, + // Block 0x94, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x95, offset 0x2ac + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x96, offset 0x2af + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 18684 bytes (18.25 KiB). Checksum: 113e23c477adfabd. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f72, 0xc1: 0x2f77, 0xc2: 0x468b, 0xc3: 0x2f7c, 0xc4: 0x469a, 0xc5: 0x469f, + 0xc6: 0xa000, 0xc7: 0x46a9, 0xc8: 0x2fe5, 0xc9: 0x2fea, 0xca: 0x46ae, 0xcb: 0x2ffe, + 0xcc: 0x3071, 0xcd: 0x3076, 0xce: 0x307b, 0xcf: 0x46c2, 0xd1: 0x3107, + 0xd2: 0x312a, 0xd3: 0x312f, 0xd4: 0x46cc, 0xd5: 0x46d1, 0xd6: 0x46e0, + 0xd8: 0xa000, 0xd9: 0x31b6, 0xda: 0x31bb, 0xdb: 0x31c0, 0xdc: 0x4712, 0xdd: 0x3238, + 0xe0: 0x327e, 0xe1: 0x3283, 0xe2: 0x471c, 0xe3: 0x3288, + 0xe4: 0x472b, 0xe5: 0x4730, 0xe6: 0xa000, 0xe7: 0x473a, 0xe8: 0x32f1, 0xe9: 0x32f6, + 0xea: 0x473f, 0xeb: 0x330a, 0xec: 0x3382, 0xed: 0x3387, 0xee: 0x338c, 0xef: 0x4753, + 0xf1: 0x3418, 0xf2: 0x343b, 0xf3: 0x3440, 0xf4: 0x475d, 0xf5: 0x4762, + 0xf6: 0x4771, 0xf8: 0xa000, 0xf9: 0x34cc, 0xfa: 0x34d1, 0xfb: 0x34d6, + 0xfc: 0x47a3, 0xfd: 0x3553, 0xff: 0x356c, + // Block 0x4, offset 0x100 + 0x100: 0x2f81, 0x101: 0x328d, 0x102: 0x4690, 0x103: 0x4721, 0x104: 0x2f9f, 0x105: 0x32ab, + 0x106: 0x2fb3, 0x107: 0x32bf, 0x108: 0x2fb8, 0x109: 0x32c4, 0x10a: 0x2fbd, 0x10b: 0x32c9, + 0x10c: 0x2fc2, 0x10d: 0x32ce, 0x10e: 0x2fcc, 0x10f: 0x32d8, + 0x112: 0x46b3, 0x113: 0x4744, 0x114: 0x2ff4, 0x115: 0x3300, 0x116: 0x2ff9, 0x117: 0x3305, + 0x118: 0x3017, 0x119: 0x3323, 0x11a: 0x3008, 0x11b: 0x3314, 0x11c: 0x3030, 0x11d: 0x333c, + 0x11e: 0x303a, 0x11f: 0x3346, 0x120: 0x303f, 0x121: 0x334b, 0x122: 0x3049, 0x123: 0x3355, + 0x124: 0x304e, 0x125: 0x335a, 0x128: 0x3080, 0x129: 0x3391, + 0x12a: 0x3085, 0x12b: 0x3396, 0x12c: 0x308a, 0x12d: 0x339b, 0x12e: 0x30ad, 0x12f: 0x33b9, + 0x130: 0x308f, 0x132: 0x195d, 0x133: 0x19ea, 0x134: 0x30b7, 0x135: 0x33c3, + 0x136: 0x30cb, 0x137: 0x33dc, 0x139: 0x30d5, 0x13a: 0x33e6, 0x13b: 0x30df, + 0x13c: 0x33f0, 0x13d: 0x30da, 0x13e: 0x33eb, 0x13f: 0x1baf, + // Block 0x5, offset 0x140 + 0x140: 0x1c37, 0x143: 0x3102, 0x144: 0x3413, 0x145: 0x311b, + 0x146: 0x342c, 0x147: 0x3111, 0x148: 0x3422, 0x149: 0x1c5f, + 0x14c: 0x46d6, 0x14d: 0x4767, 0x14e: 0x3134, 0x14f: 0x3445, 0x150: 0x313e, 0x151: 0x344f, + 0x154: 0x315c, 0x155: 0x346d, 0x156: 0x3175, 0x157: 0x3486, + 0x158: 0x3166, 0x159: 0x3477, 0x15a: 0x46f9, 0x15b: 0x478a, 0x15c: 0x317f, 0x15d: 0x3490, + 0x15e: 0x318e, 0x15f: 0x349f, 0x160: 0x46fe, 0x161: 0x478f, 0x162: 0x31a7, 0x163: 0x34bd, + 0x164: 0x3198, 0x165: 0x34ae, 0x168: 0x4708, 0x169: 0x4799, + 0x16a: 0x470d, 0x16b: 0x479e, 0x16c: 0x31c5, 0x16d: 0x34db, 0x16e: 0x31cf, 0x16f: 0x34e5, + 0x170: 0x31d4, 0x171: 0x34ea, 0x172: 0x31f2, 0x173: 0x3508, 0x174: 0x3215, 0x175: 0x352b, + 0x176: 0x323d, 0x177: 0x3558, 0x178: 0x3251, 0x179: 0x3260, 0x17a: 0x3580, 0x17b: 0x326a, + 0x17c: 0x358a, 0x17d: 0x326f, 0x17e: 0x358f, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2df1, 0x185: 0x2df7, + 0x186: 0x2dfd, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a0b, 0x18a: 0x198a, 0x18b: 0x198d, + 0x18c: 0x1a41, 0x18d: 0x2f8b, 0x18e: 0x3297, 0x18f: 0x3099, 0x190: 0x33a5, 0x191: 0x3143, + 0x192: 0x3454, 0x193: 0x31d9, 0x194: 0x34ef, 0x195: 0x39d2, 0x196: 0x3b61, 0x197: 0x39cb, + 0x198: 0x3b5a, 0x199: 0x39d9, 0x19a: 0x3b68, 0x19b: 0x39c4, 0x19c: 0x3b53, + 0x19e: 0x38b3, 0x19f: 0x3a42, 0x1a0: 0x38ac, 0x1a1: 0x3a3b, 0x1a2: 0x35b6, 0x1a3: 0x35c8, + 0x1a6: 0x3044, 0x1a7: 0x3350, 0x1a8: 0x30c1, 0x1a9: 0x33d2, + 0x1aa: 0x46ef, 0x1ab: 0x4780, 0x1ac: 0x3993, 0x1ad: 0x3b22, 0x1ae: 0x35da, 0x1af: 0x35e0, + 0x1b0: 0x33c8, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19d2, 0x1b4: 0x302b, 0x1b5: 0x3337, + 0x1b8: 0x30fd, 0x1b9: 0x340e, 0x1ba: 0x38ba, 0x1bb: 0x3a49, + 0x1bc: 0x35b0, 0x1bd: 0x35c2, 0x1be: 0x35bc, 0x1bf: 0x35ce, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f90, 0x1c1: 0x329c, 0x1c2: 0x2f95, 0x1c3: 0x32a1, 0x1c4: 0x300d, 0x1c5: 0x3319, + 0x1c6: 0x3012, 0x1c7: 0x331e, 0x1c8: 0x309e, 0x1c9: 0x33aa, 0x1ca: 0x30a3, 0x1cb: 0x33af, + 0x1cc: 0x3148, 0x1cd: 0x3459, 0x1ce: 0x314d, 0x1cf: 0x345e, 0x1d0: 0x316b, 0x1d1: 0x347c, + 0x1d2: 0x3170, 0x1d3: 0x3481, 0x1d4: 0x31de, 0x1d5: 0x34f4, 0x1d6: 0x31e3, 0x1d7: 0x34f9, + 0x1d8: 0x3189, 0x1d9: 0x349a, 0x1da: 0x31a2, 0x1db: 0x34b8, + 0x1de: 0x305d, 0x1df: 0x3369, + 0x1e6: 0x4695, 0x1e7: 0x4726, 0x1e8: 0x46bd, 0x1e9: 0x474e, + 0x1ea: 0x3962, 0x1eb: 0x3af1, 0x1ec: 0x393f, 0x1ed: 0x3ace, 0x1ee: 0x46db, 0x1ef: 0x476c, + 0x1f0: 0x395b, 0x1f1: 0x3aea, 0x1f2: 0x3247, 0x1f3: 0x3562, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49b1, 0x241: 0x49b6, 0x242: 0x9932, 0x243: 0x49bb, 0x244: 0x4a74, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a8, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425d, 0x285: 0x447e, + 0x286: 0x35ec, 0x287: 0x00ce, 0x288: 0x360a, 0x289: 0x3616, 0x28a: 0x3628, + 0x28c: 0x3646, 0x28e: 0x3658, 0x28f: 0x3676, 0x290: 0x3e0b, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x363a, 0x2ab: 0x366a, 0x2ac: 0x4801, 0x2ad: 0x369a, 0x2ae: 0x482b, 0x2af: 0x36ac, + 0x2b0: 0x3e73, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4843, 0x2cb: 0x4861, + 0x2cc: 0x36ca, 0x2cd: 0x36e2, 0x2ce: 0x4879, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430f, 0x2d4: 0x4315, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3724, 0x301: 0x3730, 0x303: 0x371e, + 0x306: 0xa000, 0x307: 0x370c, + 0x30c: 0x3760, 0x30d: 0x3748, 0x30e: 0x3772, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3754, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d8, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3736, 0x342: 0x37ba, + 0x350: 0x3712, 0x351: 0x3796, + 0x352: 0x3718, 0x353: 0x379c, 0x356: 0x372a, 0x357: 0x37ae, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x382c, 0x35b: 0x3832, 0x35c: 0x373c, 0x35d: 0x37c0, + 0x35e: 0x3742, 0x35f: 0x37c6, 0x362: 0x374e, 0x363: 0x37d2, + 0x364: 0x375a, 0x365: 0x37de, 0x366: 0x3766, 0x367: 0x37ea, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3838, 0x36b: 0x383e, 0x36c: 0x3790, 0x36d: 0x3814, 0x36e: 0x376c, 0x36f: 0x37f0, + 0x370: 0x3778, 0x371: 0x37fc, 0x372: 0x377e, 0x373: 0x3802, 0x374: 0x3784, 0x375: 0x3808, + 0x378: 0x378a, 0x379: 0x380e, + // Block 0xe, offset 0x380 + 0x387: 0x1d64, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d87, + 0x3f6: 0x2016, 0x3f7: 0x2052, 0x3f8: 0x204d, + // Block 0x10, offset 0x400 + 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d, + 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d29, 0x447: 0xa000, 0x448: 0x2d31, 0x449: 0xa000, 0x44a: 0x2d39, 0x44b: 0xa000, + 0x44c: 0x2d41, 0x44d: 0xa000, 0x44e: 0x2d49, 0x451: 0xa000, + 0x452: 0x2d51, + 0x474: 0x8102, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d59, + 0x47c: 0xa000, 0x47d: 0x2d61, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be, + 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa, + 0x4aa: 0x01fd, + 0x4b8: 0x020c, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128, + 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0, + // Block 0x14, offset 0x500 + 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132, + 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132, + 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132, + 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132, + 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132, + 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132, + 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132, + 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132, + 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132, + 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132, + 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d, + // Block 0x15, offset 0x540 + 0x540: 0x2f9a, 0x541: 0x32a6, 0x542: 0x2fa4, 0x543: 0x32b0, 0x544: 0x2fa9, 0x545: 0x32b5, + 0x546: 0x2fae, 0x547: 0x32ba, 0x548: 0x38cf, 0x549: 0x3a5e, 0x54a: 0x2fc7, 0x54b: 0x32d3, + 0x54c: 0x2fd1, 0x54d: 0x32dd, 0x54e: 0x2fe0, 0x54f: 0x32ec, 0x550: 0x2fd6, 0x551: 0x32e2, + 0x552: 0x2fdb, 0x553: 0x32e7, 0x554: 0x38f2, 0x555: 0x3a81, 0x556: 0x38f9, 0x557: 0x3a88, + 0x558: 0x301c, 0x559: 0x3328, 0x55a: 0x3021, 0x55b: 0x332d, 0x55c: 0x3907, 0x55d: 0x3a96, + 0x55e: 0x3026, 0x55f: 0x3332, 0x560: 0x3035, 0x561: 0x3341, 0x562: 0x3053, 0x563: 0x335f, + 0x564: 0x3062, 0x565: 0x336e, 0x566: 0x3058, 0x567: 0x3364, 0x568: 0x3067, 0x569: 0x3373, + 0x56a: 0x306c, 0x56b: 0x3378, 0x56c: 0x30b2, 0x56d: 0x33be, 0x56e: 0x390e, 0x56f: 0x3a9d, + 0x570: 0x30bc, 0x571: 0x33cd, 0x572: 0x30c6, 0x573: 0x33d7, 0x574: 0x30d0, 0x575: 0x33e1, + 0x576: 0x46c7, 0x577: 0x4758, 0x578: 0x3915, 0x579: 0x3aa4, 0x57a: 0x30e9, 0x57b: 0x33fa, + 0x57c: 0x30e4, 0x57d: 0x33f5, 0x57e: 0x30ee, 0x57f: 0x33ff, + // Block 0x16, offset 0x580 + 0x580: 0x30f3, 0x581: 0x3404, 0x582: 0x30f8, 0x583: 0x3409, 0x584: 0x310c, 0x585: 0x341d, + 0x586: 0x3116, 0x587: 0x3427, 0x588: 0x3125, 0x589: 0x3436, 0x58a: 0x3120, 0x58b: 0x3431, + 0x58c: 0x3938, 0x58d: 0x3ac7, 0x58e: 0x3946, 0x58f: 0x3ad5, 0x590: 0x394d, 0x591: 0x3adc, + 0x592: 0x3954, 0x593: 0x3ae3, 0x594: 0x3152, 0x595: 0x3463, 0x596: 0x3157, 0x597: 0x3468, + 0x598: 0x3161, 0x599: 0x3472, 0x59a: 0x46f4, 0x59b: 0x4785, 0x59c: 0x399a, 0x59d: 0x3b29, + 0x59e: 0x317a, 0x59f: 0x348b, 0x5a0: 0x3184, 0x5a1: 0x3495, 0x5a2: 0x4703, 0x5a3: 0x4794, + 0x5a4: 0x39a1, 0x5a5: 0x3b30, 0x5a6: 0x39a8, 0x5a7: 0x3b37, 0x5a8: 0x39af, 0x5a9: 0x3b3e, + 0x5aa: 0x3193, 0x5ab: 0x34a4, 0x5ac: 0x319d, 0x5ad: 0x34b3, 0x5ae: 0x31b1, 0x5af: 0x34c7, + 0x5b0: 0x31ac, 0x5b1: 0x34c2, 0x5b2: 0x31ed, 0x5b3: 0x3503, 0x5b4: 0x31fc, 0x5b5: 0x3512, + 0x5b6: 0x31f7, 0x5b7: 0x350d, 0x5b8: 0x39b6, 0x5b9: 0x3b45, 0x5ba: 0x39bd, 0x5bb: 0x3b4c, + 0x5bc: 0x3201, 0x5bd: 0x3517, 0x5be: 0x3206, 0x5bf: 0x351c, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x320b, 0x5c1: 0x3521, 0x5c2: 0x3210, 0x5c3: 0x3526, 0x5c4: 0x321f, 0x5c5: 0x3535, + 0x5c6: 0x321a, 0x5c7: 0x3530, 0x5c8: 0x3224, 0x5c9: 0x353f, 0x5ca: 0x3229, 0x5cb: 0x3544, + 0x5cc: 0x322e, 0x5cd: 0x3549, 0x5ce: 0x324c, 0x5cf: 0x3567, 0x5d0: 0x3265, 0x5d1: 0x3585, + 0x5d2: 0x3274, 0x5d3: 0x3594, 0x5d4: 0x3279, 0x5d5: 0x3599, 0x5d6: 0x337d, 0x5d7: 0x34a9, + 0x5d8: 0x353a, 0x5d9: 0x3576, 0x5da: 0x1be3, 0x5db: 0x42da, + 0x5e0: 0x46a4, 0x5e1: 0x4735, 0x5e2: 0x2f86, 0x5e3: 0x3292, + 0x5e4: 0x387b, 0x5e5: 0x3a0a, 0x5e6: 0x3874, 0x5e7: 0x3a03, 0x5e8: 0x3889, 0x5e9: 0x3a18, + 0x5ea: 0x3882, 0x5eb: 0x3a11, 0x5ec: 0x38c1, 0x5ed: 0x3a50, 0x5ee: 0x3897, 0x5ef: 0x3a26, + 0x5f0: 0x3890, 0x5f1: 0x3a1f, 0x5f2: 0x38a5, 0x5f3: 0x3a34, 0x5f4: 0x389e, 0x5f5: 0x3a2d, + 0x5f6: 0x38c8, 0x5f7: 0x3a57, 0x5f8: 0x46b8, 0x5f9: 0x4749, 0x5fa: 0x3003, 0x5fb: 0x330f, + 0x5fc: 0x2fef, 0x5fd: 0x32fb, 0x5fe: 0x38dd, 0x5ff: 0x3a6c, + // Block 0x18, offset 0x600 + 0x600: 0x38d6, 0x601: 0x3a65, 0x602: 0x38eb, 0x603: 0x3a7a, 0x604: 0x38e4, 0x605: 0x3a73, + 0x606: 0x3900, 0x607: 0x3a8f, 0x608: 0x3094, 0x609: 0x33a0, 0x60a: 0x30a8, 0x60b: 0x33b4, + 0x60c: 0x46ea, 0x60d: 0x477b, 0x60e: 0x3139, 0x60f: 0x344a, 0x610: 0x3923, 0x611: 0x3ab2, + 0x612: 0x391c, 0x613: 0x3aab, 0x614: 0x3931, 0x615: 0x3ac0, 0x616: 0x392a, 0x617: 0x3ab9, + 0x618: 0x398c, 0x619: 0x3b1b, 0x61a: 0x3970, 0x61b: 0x3aff, 0x61c: 0x3969, 0x61d: 0x3af8, + 0x61e: 0x397e, 0x61f: 0x3b0d, 0x620: 0x3977, 0x621: 0x3b06, 0x622: 0x3985, 0x623: 0x3b14, + 0x624: 0x31e8, 0x625: 0x34fe, 0x626: 0x31ca, 0x627: 0x34e0, 0x628: 0x39e7, 0x629: 0x3b76, + 0x62a: 0x39e0, 0x62b: 0x3b6f, 0x62c: 0x39f5, 0x62d: 0x3b84, 0x62e: 0x39ee, 0x62f: 0x3b7d, + 0x630: 0x39fc, 0x631: 0x3b8b, 0x632: 0x3233, 0x633: 0x354e, 0x634: 0x325b, 0x635: 0x357b, + 0x636: 0x3256, 0x637: 0x3571, 0x638: 0x3242, 0x639: 0x355d, + // Block 0x19, offset 0x640 + 0x640: 0x4807, 0x641: 0x480d, 0x642: 0x4921, 0x643: 0x4939, 0x644: 0x4929, 0x645: 0x4941, + 0x646: 0x4931, 0x647: 0x4949, 0x648: 0x47ad, 0x649: 0x47b3, 0x64a: 0x4891, 0x64b: 0x48a9, + 0x64c: 0x4899, 0x64d: 0x48b1, 0x64e: 0x48a1, 0x64f: 0x48b9, 0x650: 0x4819, 0x651: 0x481f, + 0x652: 0x3dbb, 0x653: 0x3dcb, 0x654: 0x3dc3, 0x655: 0x3dd3, + 0x658: 0x47b9, 0x659: 0x47bf, 0x65a: 0x3ceb, 0x65b: 0x3cfb, 0x65c: 0x3cf3, 0x65d: 0x3d03, + 0x660: 0x4831, 0x661: 0x4837, 0x662: 0x4951, 0x663: 0x4969, + 0x664: 0x4959, 0x665: 0x4971, 0x666: 0x4961, 0x667: 0x4979, 0x668: 0x47c5, 0x669: 0x47cb, + 0x66a: 0x48c1, 0x66b: 0x48d9, 0x66c: 0x48c9, 0x66d: 0x48e1, 0x66e: 0x48d1, 0x66f: 0x48e9, + 0x670: 0x4849, 0x671: 0x484f, 0x672: 0x3e1b, 0x673: 0x3e33, 0x674: 0x3e23, 0x675: 0x3e3b, + 0x676: 0x3e2b, 0x677: 0x3e43, 0x678: 0x47d1, 0x679: 0x47d7, 0x67a: 0x3d1b, 0x67b: 0x3d33, + 0x67c: 0x3d23, 0x67d: 0x3d3b, 0x67e: 0x3d2b, 0x67f: 0x3d43, + // Block 0x1a, offset 0x680 + 0x680: 0x4855, 0x681: 0x485b, 0x682: 0x3e4b, 0x683: 0x3e5b, 0x684: 0x3e53, 0x685: 0x3e63, + 0x688: 0x47dd, 0x689: 0x47e3, 0x68a: 0x3d4b, 0x68b: 0x3d5b, + 0x68c: 0x3d53, 0x68d: 0x3d63, 0x690: 0x4867, 0x691: 0x486d, + 0x692: 0x3e83, 0x693: 0x3e9b, 0x694: 0x3e8b, 0x695: 0x3ea3, 0x696: 0x3e93, 0x697: 0x3eab, + 0x699: 0x47e9, 0x69b: 0x3d6b, 0x69d: 0x3d73, + 0x69f: 0x3d7b, 0x6a0: 0x487f, 0x6a1: 0x4885, 0x6a2: 0x4981, 0x6a3: 0x4999, + 0x6a4: 0x4989, 0x6a5: 0x49a1, 0x6a6: 0x4991, 0x6a7: 0x49a9, 0x6a8: 0x47ef, 0x6a9: 0x47f5, + 0x6aa: 0x48f1, 0x6ab: 0x4909, 0x6ac: 0x48f9, 0x6ad: 0x4911, 0x6ae: 0x4901, 0x6af: 0x4919, + 0x6b0: 0x47fb, 0x6b1: 0x4321, 0x6b2: 0x3694, 0x6b3: 0x4327, 0x6b4: 0x4825, 0x6b5: 0x432d, + 0x6b6: 0x36a6, 0x6b7: 0x4333, 0x6b8: 0x36c4, 0x6b9: 0x4339, 0x6ba: 0x36dc, 0x6bb: 0x433f, + 0x6bc: 0x4873, 0x6bd: 0x4345, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3da3, 0x6c1: 0x3dab, 0x6c2: 0x4187, 0x6c3: 0x41a5, 0x6c4: 0x4191, 0x6c5: 0x41af, + 0x6c6: 0x419b, 0x6c7: 0x41b9, 0x6c8: 0x3cdb, 0x6c9: 0x3ce3, 0x6ca: 0x40d3, 0x6cb: 0x40f1, + 0x6cc: 0x40dd, 0x6cd: 0x40fb, 0x6ce: 0x40e7, 0x6cf: 0x4105, 0x6d0: 0x3deb, 0x6d1: 0x3df3, + 0x6d2: 0x41c3, 0x6d3: 0x41e1, 0x6d4: 0x41cd, 0x6d5: 0x41eb, 0x6d6: 0x41d7, 0x6d7: 0x41f5, + 0x6d8: 0x3d0b, 0x6d9: 0x3d13, 0x6da: 0x410f, 0x6db: 0x412d, 0x6dc: 0x4119, 0x6dd: 0x4137, + 0x6de: 0x4123, 0x6df: 0x4141, 0x6e0: 0x3ec3, 0x6e1: 0x3ecb, 0x6e2: 0x41ff, 0x6e3: 0x421d, + 0x6e4: 0x4209, 0x6e5: 0x4227, 0x6e6: 0x4213, 0x6e7: 0x4231, 0x6e8: 0x3d83, 0x6e9: 0x3d8b, + 0x6ea: 0x414b, 0x6eb: 0x4169, 0x6ec: 0x4155, 0x6ed: 0x4173, 0x6ee: 0x415f, 0x6ef: 0x417d, + 0x6f0: 0x3688, 0x6f1: 0x3682, 0x6f2: 0x3d93, 0x6f3: 0x368e, 0x6f4: 0x3d9b, + 0x6f6: 0x4813, 0x6f7: 0x3db3, 0x6f8: 0x35f8, 0x6f9: 0x35f2, 0x6fa: 0x35e6, 0x6fb: 0x42f1, + 0x6fc: 0x35fe, 0x6fd: 0x428a, 0x6fe: 0x01d3, 0x6ff: 0x428a, + // Block 0x1c, offset 0x700 + 0x700: 0x42a3, 0x701: 0x4485, 0x702: 0x3ddb, 0x703: 0x36a0, 0x704: 0x3de3, + 0x706: 0x483d, 0x707: 0x3dfb, 0x708: 0x3604, 0x709: 0x42f7, 0x70a: 0x3610, 0x70b: 0x42fd, + 0x70c: 0x361c, 0x70d: 0x448c, 0x70e: 0x4493, 0x70f: 0x449a, 0x710: 0x36b8, 0x711: 0x36b2, + 0x712: 0x3e03, 0x713: 0x44e7, 0x716: 0x36be, 0x717: 0x3e13, + 0x718: 0x3634, 0x719: 0x362e, 0x71a: 0x3622, 0x71b: 0x4303, 0x71d: 0x44a1, + 0x71e: 0x44a8, 0x71f: 0x44af, 0x720: 0x36ee, 0x721: 0x36e8, 0x722: 0x3e6b, 0x723: 0x44ef, + 0x724: 0x36d0, 0x725: 0x36d6, 0x726: 0x36f4, 0x727: 0x3e7b, 0x728: 0x3664, 0x729: 0x365e, + 0x72a: 0x3652, 0x72b: 0x430f, 0x72c: 0x364c, 0x72d: 0x4477, 0x72e: 0x447e, 0x72f: 0x0081, + 0x732: 0x3eb3, 0x733: 0x36fa, 0x734: 0x3ebb, + 0x736: 0x488b, 0x737: 0x3ed3, 0x738: 0x3640, 0x739: 0x4309, 0x73a: 0x3670, 0x73b: 0x431b, + 0x73c: 0x367c, 0x73d: 0x425d, 0x73e: 0x428f, + // Block 0x1d, offset 0x740 + 0x740: 0x1bdb, 0x741: 0x1bdf, 0x742: 0x0047, 0x743: 0x1c57, 0x745: 0x1beb, + 0x746: 0x1bef, 0x747: 0x00e9, 0x749: 0x1c5b, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1990, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x19a2, 0x761: 0x1bcb, 0x762: 0x19ab, + 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42d5, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215, + 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b9b, + 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0, + // Block 0x1e, offset 0x780 + 0x780: 0x0463, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x2231, 0x791: 0x223d, + 0x792: 0x22f1, 0x793: 0x2219, 0x794: 0x229d, 0x795: 0x2225, 0x796: 0x22a3, 0x797: 0x22bb, + 0x798: 0x22c7, 0x799: 0x222b, 0x79a: 0x22cd, 0x79b: 0x2237, 0x79c: 0x22c1, 0x79d: 0x22d3, + 0x79e: 0x22d9, 0x79f: 0x1cbf, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba7, 0x7a3: 0x1963, + 0x7a4: 0x006d, 0x7a5: 0x19ae, 0x7a6: 0x1bd3, 0x7a7: 0x1d4b, 0x7a8: 0x1966, 0x7a9: 0x0071, + 0x7aa: 0x19ba, 0x7ab: 0x1bd7, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19e7, 0x7b2: 0x1c1b, 0x7b3: 0x19f0, 0x7b4: 0x00ad, 0x7b5: 0x1a65, + 0x7b6: 0x1c4f, 0x7b7: 0x1d5f, 0x7b8: 0x19f3, 0x7b9: 0x00b1, 0x7ba: 0x1a68, 0x7bb: 0x1c53, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c09, 0x7c3: 0xa000, 0x7c4: 0x3c10, 0x7c5: 0xa000, + 0x7c7: 0x3c17, 0x7c8: 0xa000, 0x7c9: 0x3c1e, + 0x7cd: 0xa000, + 0x7e0: 0x2f68, 0x7e1: 0xa000, 0x7e2: 0x3c2c, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c25, 0x7ee: 0x2f63, 0x7ef: 0x2f6d, + 0x7f0: 0x3c33, 0x7f1: 0x3c3a, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c41, 0x7f5: 0x3c48, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4f, 0x7f9: 0x3c56, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c5d, 0x801: 0x3c64, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c79, 0x805: 0x3c80, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c87, 0x809: 0x3c8e, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3ca3, 0x82d: 0x3caa, 0x82e: 0x3cb1, 0x82f: 0x3cb8, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882, + 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894, + 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a6b, 0x875: 0x1a6f, + 0x876: 0x1a73, 0x877: 0x1a77, 0x878: 0x1a7b, 0x879: 0x1a7f, 0x87a: 0x1a83, 0x87b: 0x1a87, + 0x87c: 0x1a8b, 0x87d: 0x1c83, 0x87e: 0x1c88, 0x87f: 0x1c8d, + // Block 0x22, offset 0x880 + 0x880: 0x1c92, 0x881: 0x1c97, 0x882: 0x1c9c, 0x883: 0x1ca1, 0x884: 0x1ca6, 0x885: 0x1cab, + 0x886: 0x1cb0, 0x887: 0x1cb5, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb, + 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b63, + 0x892: 0x1b67, 0x893: 0x1b6b, 0x894: 0x1b6f, 0x895: 0x1b73, 0x896: 0x1b77, 0x897: 0x1b7b, + 0x898: 0x1b7f, 0x899: 0x1b83, 0x89a: 0x1b87, 0x89b: 0x1b8b, 0x89c: 0x1af7, 0x89d: 0x1afb, + 0x89e: 0x1aff, 0x89f: 0x1b03, 0x8a0: 0x1b07, 0x8a1: 0x1b0b, 0x8a2: 0x1b0f, 0x8a3: 0x1b13, + 0x8a4: 0x1b17, 0x8a5: 0x1b1b, 0x8a6: 0x1b1f, 0x8a7: 0x1b23, 0x8a8: 0x1b27, 0x8a9: 0x1b2b, + 0x8aa: 0x1b2f, 0x8ab: 0x1b33, 0x8ac: 0x1b37, 0x8ad: 0x1b3b, 0x8ae: 0x1b3f, 0x8af: 0x1b43, + 0x8b0: 0x1b47, 0x8b1: 0x1b4b, 0x8b2: 0x1b4f, 0x8b3: 0x1b53, 0x8b4: 0x1b57, 0x8b5: 0x1b5b, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713, + 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab, + 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803, + 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887, + 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db, + 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb, + 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b, + 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7, + 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33, + 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63, + 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f, + // Block 0x24, offset 0x900 + 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb, + 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b, + 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb, + 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3, + 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f, + 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83, + 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7, + 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f, + 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf, + 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f, + 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187, + // Block 0x25, offset 0x940 + 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3, + 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb, + 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b, + 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b, + 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf, + 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f, + 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f, + 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503, + 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f, + 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f, + 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f0b, 0x98d: 0xa000, 0x98e: 0x3f13, 0x98f: 0xa000, 0x990: 0x3f1b, 0x991: 0xa000, + 0x992: 0x3f23, 0x993: 0xa000, 0x994: 0x3f2b, 0x995: 0xa000, 0x996: 0x3f33, 0x997: 0xa000, + 0x998: 0x3f3b, 0x999: 0xa000, 0x99a: 0x3f43, 0x99b: 0xa000, 0x99c: 0x3f4b, 0x99d: 0xa000, + 0x99e: 0x3f53, 0x99f: 0xa000, 0x9a0: 0x3f5b, 0x9a1: 0xa000, 0x9a2: 0x3f63, + 0x9a4: 0xa000, 0x9a5: 0x3f6b, 0x9a6: 0xa000, 0x9a7: 0x3f73, 0x9a8: 0xa000, 0x9a9: 0x3f7b, + 0x9af: 0xa000, + 0x9b0: 0x3f83, 0x9b1: 0x3f8b, 0x9b2: 0xa000, 0x9b3: 0x3f93, 0x9b4: 0x3f9b, 0x9b5: 0xa000, + 0x9b6: 0x3fa3, 0x9b7: 0x3fab, 0x9b8: 0xa000, 0x9b9: 0x3fb3, 0x9ba: 0x3fbb, 0x9bb: 0xa000, + 0x9bc: 0x3fc3, 0x9bd: 0x3fcb, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f03, + 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42df, 0x9dc: 0x42e5, 0x9dd: 0xa000, + 0x9de: 0x3fd3, 0x9df: 0x26b7, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3fe3, 0x9ed: 0xa000, 0x9ee: 0x3feb, 0x9ef: 0xa000, + 0x9f0: 0x3ff3, 0x9f1: 0xa000, 0x9f2: 0x3ffb, 0x9f3: 0xa000, 0x9f4: 0x4003, 0x9f5: 0xa000, + 0x9f6: 0x400b, 0x9f7: 0xa000, 0x9f8: 0x4013, 0x9f9: 0xa000, 0x9fa: 0x401b, 0x9fb: 0xa000, + 0x9fc: 0x4023, 0x9fd: 0xa000, 0x9fe: 0x402b, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4033, 0xa01: 0xa000, 0xa02: 0x403b, 0xa04: 0xa000, 0xa05: 0x4043, + 0xa06: 0xa000, 0xa07: 0x404b, 0xa08: 0xa000, 0xa09: 0x4053, + 0xa0f: 0xa000, 0xa10: 0x405b, 0xa11: 0x4063, + 0xa12: 0xa000, 0xa13: 0x406b, 0xa14: 0x4073, 0xa15: 0xa000, 0xa16: 0x407b, 0xa17: 0x4083, + 0xa18: 0xa000, 0xa19: 0x408b, 0xa1a: 0x4093, 0xa1b: 0xa000, 0xa1c: 0x409b, 0xa1d: 0x40a3, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fdb, + 0xa37: 0x40ab, 0xa38: 0x40b3, 0xa39: 0x40bb, 0xa3a: 0x40c3, + 0xa3d: 0xa000, 0xa3e: 0x40cb, 0xa3f: 0x26cc, + // Block 0x29, offset 0xa40 + 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337, + 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f, + 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49c0, 0xa50: 0x49c6, 0xa51: 0x49cc, + 0xa52: 0x49d2, 0xa53: 0x49d8, 0xa54: 0x49de, 0xa55: 0x49e4, 0xa56: 0x49ea, 0xa57: 0x49f0, + 0xa58: 0x49f6, 0xa59: 0x49fc, 0xa5a: 0x4a02, 0xa5b: 0x4a08, 0xa5c: 0x4a0e, 0xa5d: 0x4a14, + 0xa5e: 0x4a1a, 0xa5f: 0x4a20, 0xa60: 0x4a26, 0xa61: 0x4a2c, 0xa62: 0x4a32, 0xa63: 0x4a38, + 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef, + 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403, + 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383, + 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b, + 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb, + 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7, + 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3, + 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7, + 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff, + 0xa9e: 0x098f, 0xa9f: 0x072f, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2057, 0xac1: 0x205d, 0xac2: 0x2063, 0xac3: 0x2069, 0xac4: 0x206f, 0xac5: 0x2075, + 0xac6: 0x207b, 0xac7: 0x2081, 0xac8: 0x2087, 0xac9: 0x208d, 0xaca: 0x2093, 0xacb: 0x2099, + 0xacc: 0x209f, 0xacd: 0x20a5, 0xace: 0x2729, 0xacf: 0x2732, 0xad0: 0x273b, 0xad1: 0x2744, + 0xad2: 0x274d, 0xad3: 0x2756, 0xad4: 0x275f, 0xad5: 0x2768, 0xad6: 0x2771, 0xad7: 0x2783, + 0xad8: 0x278c, 0xad9: 0x2795, 0xada: 0x279e, 0xadb: 0x27a7, 0xadc: 0x277a, 0xadd: 0x2baf, + 0xade: 0x2af0, 0xae0: 0x20ab, 0xae1: 0x20c3, 0xae2: 0x20b7, 0xae3: 0x210b, + 0xae4: 0x20c9, 0xae5: 0x20e7, 0xae6: 0x20b1, 0xae7: 0x20e1, 0xae8: 0x20bd, 0xae9: 0x20f3, + 0xaea: 0x2123, 0xaeb: 0x2141, 0xaec: 0x213b, 0xaed: 0x212f, 0xaee: 0x217d, 0xaef: 0x2111, + 0xaf0: 0x211d, 0xaf1: 0x2135, 0xaf2: 0x2129, 0xaf3: 0x2153, 0xaf4: 0x20ff, 0xaf5: 0x2147, + 0xaf6: 0x2171, 0xaf7: 0x2159, 0xaf8: 0x20ed, 0xaf9: 0x20cf, 0xafa: 0x2105, 0xafb: 0x2117, + 0xafc: 0x214d, 0xafd: 0x20d5, 0xafe: 0x2177, 0xaff: 0x20f9, + // Block 0x2c, offset 0xb00 + 0xb00: 0x215f, 0xb01: 0x20db, 0xb02: 0x2165, 0xb03: 0x216b, 0xb04: 0x092f, 0xb05: 0x0b03, + 0xb06: 0x0ca7, 0xb07: 0x10c7, + 0xb10: 0x1bc7, 0xb11: 0x18a9, + 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb, + 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3, + 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327, + 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b, + 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e1b, 0xb2f: 0x2e23, + 0xb30: 0x2e2b, 0xb31: 0x2e33, 0xb32: 0x2e3b, 0xb33: 0x2e43, 0xb34: 0x2e4b, 0xb35: 0x2e53, + 0xb36: 0x2e63, 0xb37: 0x2e6b, 0xb38: 0x2e73, 0xb39: 0x2e7b, 0xb3a: 0x2e83, 0xb3b: 0x2e8b, + 0xb3c: 0x2ed6, 0xb3d: 0x2e9e, 0xb3e: 0x2e5b, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af, + 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f, + 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b, + 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f, + 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f, + 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b, + 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f, + 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b, + 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee, + 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900, + 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1cc9, 0xb81: 0x1cd8, 0xb82: 0x1ce7, 0xb83: 0x1cf6, 0xb84: 0x1d05, 0xb85: 0x1d14, + 0xb86: 0x1d23, 0xb87: 0x1d32, 0xb88: 0x1d41, 0xb89: 0x218f, 0xb8a: 0x21a1, 0xb8b: 0x21b3, + 0xb8c: 0x1954, 0xb8d: 0x1c07, 0xb8e: 0x19d5, 0xb8f: 0x1bab, 0xb90: 0x04cb, 0xb91: 0x04d3, + 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7, + 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f, + 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b, + 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543, + 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b, + 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f, + 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597, + 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b0f, 0xbc1: 0x29ab, 0xbc2: 0x2b1f, 0xbc3: 0x2883, 0xbc4: 0x2ee7, 0xbc5: 0x288d, + 0xbc6: 0x2897, 0xbc7: 0x2f2b, 0xbc8: 0x29b8, 0xbc9: 0x28a1, 0xbca: 0x28ab, 0xbcb: 0x28b5, + 0xbcc: 0x29df, 0xbcd: 0x29ec, 0xbce: 0x29c5, 0xbcf: 0x29d2, 0xbd0: 0x2eac, 0xbd1: 0x29f9, + 0xbd2: 0x2a06, 0xbd3: 0x2bc1, 0xbd4: 0x26be, 0xbd5: 0x2bd4, 0xbd6: 0x2be7, 0xbd7: 0x2b2f, + 0xbd8: 0x2a13, 0xbd9: 0x2bfa, 0xbda: 0x2c0d, 0xbdb: 0x2a20, 0xbdc: 0x28bf, 0xbdd: 0x28c9, + 0xbde: 0x2eba, 0xbdf: 0x2a2d, 0xbe0: 0x2b3f, 0xbe1: 0x2ef8, 0xbe2: 0x28d3, 0xbe3: 0x28dd, + 0xbe4: 0x2a3a, 0xbe5: 0x28e7, 0xbe6: 0x28f1, 0xbe7: 0x26d3, 0xbe8: 0x26da, 0xbe9: 0x28fb, + 0xbea: 0x2905, 0xbeb: 0x2c20, 0xbec: 0x2a47, 0xbed: 0x2b4f, 0xbee: 0x2c33, 0xbef: 0x2a54, + 0xbf0: 0x2919, 0xbf1: 0x290f, 0xbf2: 0x2f3f, 0xbf3: 0x2a61, 0xbf4: 0x2c46, 0xbf5: 0x2923, + 0xbf6: 0x2b5f, 0xbf7: 0x292d, 0xbf8: 0x2a7b, 0xbf9: 0x2937, 0xbfa: 0x2a88, 0xbfb: 0x2f09, + 0xbfc: 0x2a6e, 0xbfd: 0x2b6f, 0xbfe: 0x2a95, 0xbff: 0x26e1, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f1a, 0xc01: 0x2941, 0xc02: 0x294b, 0xc03: 0x2aa2, 0xc04: 0x2955, 0xc05: 0x295f, + 0xc06: 0x2969, 0xc07: 0x2b7f, 0xc08: 0x2aaf, 0xc09: 0x26e8, 0xc0a: 0x2c59, 0xc0b: 0x2e93, + 0xc0c: 0x2b8f, 0xc0d: 0x2abc, 0xc0e: 0x2ec8, 0xc0f: 0x2973, 0xc10: 0x297d, 0xc11: 0x2ac9, + 0xc12: 0x26ef, 0xc13: 0x2ad6, 0xc14: 0x2b9f, 0xc15: 0x26f6, 0xc16: 0x2c6c, 0xc17: 0x2987, + 0xc18: 0x1cba, 0xc19: 0x1cce, 0xc1a: 0x1cdd, 0xc1b: 0x1cec, 0xc1c: 0x1cfb, 0xc1d: 0x1d0a, + 0xc1e: 0x1d19, 0xc1f: 0x1d28, 0xc20: 0x1d37, 0xc21: 0x1d46, 0xc22: 0x2195, 0xc23: 0x21a7, + 0xc24: 0x21b9, 0xc25: 0x21c5, 0xc26: 0x21d1, 0xc27: 0x21dd, 0xc28: 0x21e9, 0xc29: 0x21f5, + 0xc2a: 0x2201, 0xc2b: 0x220d, 0xc2c: 0x2249, 0xc2d: 0x2255, 0xc2e: 0x2261, 0xc2f: 0x226d, + 0xc30: 0x2279, 0xc31: 0x1c17, 0xc32: 0x19c9, 0xc33: 0x1936, 0xc34: 0x1be7, 0xc35: 0x1a4a, + 0xc36: 0x1a59, 0xc37: 0x19cf, 0xc38: 0x1bff, 0xc39: 0x1c03, 0xc3a: 0x1960, 0xc3b: 0x2704, + 0xc3c: 0x2712, 0xc3d: 0x26fd, 0xc3e: 0x270b, 0xc3f: 0x2ae3, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a4d, 0xc41: 0x1a35, 0xc42: 0x1c63, 0xc43: 0x1a1d, 0xc44: 0x19f6, 0xc45: 0x1969, + 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf3, 0xc49: 0x1d55, 0xc4a: 0x1a50, 0xc4b: 0x1a38, + 0xc4c: 0x1c67, 0xc4d: 0x1c73, 0xc4e: 0x1a29, 0xc4f: 0x19ff, 0xc50: 0x1957, 0xc51: 0x1c1f, + 0xc52: 0x1bb3, 0xc53: 0x1b9f, 0xc54: 0x1bcf, 0xc55: 0x1c77, 0xc56: 0x1a2c, 0xc57: 0x19cc, + 0xc58: 0x1a02, 0xc59: 0x19e1, 0xc5a: 0x1a44, 0xc5b: 0x1c7b, 0xc5c: 0x1a2f, 0xc5d: 0x19c3, + 0xc5e: 0x1a05, 0xc5f: 0x1c3f, 0xc60: 0x1bf7, 0xc61: 0x1a17, 0xc62: 0x1c27, 0xc63: 0x1c43, + 0xc64: 0x1bfb, 0xc65: 0x1a1a, 0xc66: 0x1c2b, 0xc67: 0x22eb, 0xc68: 0x22ff, 0xc69: 0x1999, + 0xc6a: 0x1c23, 0xc6b: 0x1bb7, 0xc6c: 0x1ba3, 0xc6d: 0x1c4b, 0xc6e: 0x2719, 0xc6f: 0x27b0, + 0xc70: 0x1a5c, 0xc71: 0x1a47, 0xc72: 0x1c7f, 0xc73: 0x1a32, 0xc74: 0x1a53, 0xc75: 0x1a3b, + 0xc76: 0x1c6b, 0xc77: 0x1a20, 0xc78: 0x19f9, 0xc79: 0x1984, 0xc7a: 0x1a56, 0xc7b: 0x1a3e, + 0xc7c: 0x1c6f, 0xc7d: 0x1a23, 0xc7e: 0x19fc, 0xc7f: 0x1987, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c2f, 0xc81: 0x1bbb, 0xc82: 0x1d50, 0xc83: 0x1939, 0xc84: 0x19bd, 0xc85: 0x19c0, + 0xc86: 0x22f8, 0xc87: 0x1b97, 0xc88: 0x19c6, 0xc89: 0x194b, 0xc8a: 0x19e4, 0xc8b: 0x194e, + 0xc8c: 0x19ed, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a08, 0xc90: 0x1a0e, 0xc91: 0x1a11, + 0xc92: 0x1c33, 0xc93: 0x1a14, 0xc94: 0x1a26, 0xc95: 0x1c3b, 0xc96: 0x1c47, 0xc97: 0x1993, + 0xc98: 0x1d5a, 0xc99: 0x1bbf, 0xc9a: 0x1996, 0xc9b: 0x1a5f, 0xc9c: 0x19a8, 0xc9d: 0x19b7, + 0xc9e: 0x22e5, 0xc9f: 0x22df, 0xca0: 0x1cc4, 0xca1: 0x1cd3, 0xca2: 0x1ce2, 0xca3: 0x1cf1, + 0xca4: 0x1d00, 0xca5: 0x1d0f, 0xca6: 0x1d1e, 0xca7: 0x1d2d, 0xca8: 0x1d3c, 0xca9: 0x2189, + 0xcaa: 0x219b, 0xcab: 0x21ad, 0xcac: 0x21bf, 0xcad: 0x21cb, 0xcae: 0x21d7, 0xcaf: 0x21e3, + 0xcb0: 0x21ef, 0xcb1: 0x21fb, 0xcb2: 0x2207, 0xcb3: 0x2243, 0xcb4: 0x224f, 0xcb5: 0x225b, + 0xcb6: 0x2267, 0xcb7: 0x2273, 0xcb8: 0x227f, 0xcb9: 0x2285, 0xcba: 0x228b, 0xcbb: 0x2291, + 0xcbc: 0x2297, 0xcbd: 0x22a9, 0xcbe: 0x22af, 0xcbf: 0x1c13, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb, + 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943, + 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3, + 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43, + 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87, + 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283, + 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f, + 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853, + 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b, + 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b, + 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b, + // Block 0x34, offset 0xd00 + 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b, + 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f, + 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7, + 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127, + 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357, + 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873, + 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3, + 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b, + 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57, + 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb, + 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b, + // Block 0x35, offset 0xd40 + 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f, + 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3, + 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83, + 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193, + 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b, + 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b, + 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f, + 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b, + 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753, + 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777, + 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3, + 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47, + 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af, + 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df, + 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817, + 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3, + 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457, + 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b, + 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27, + 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f, + 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03, + 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27, + 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af, + 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3, + 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb, + 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353, + 0xde5: 0x1407, 0xde6: 0x1433, + 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7, + 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897, + 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93, + 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3, + // Block 0x38, offset 0xe00 + 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b, + 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f, + 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f, + 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f, + 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff, + 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f, + 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f, + 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3, + 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7, + 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963, + 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b, + 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb, + 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf, + 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f, + 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013, + 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f, + 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b, + 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b, + 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb, + 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343, + 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b, + 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b, + 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2, + 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809, + 0xe98: 0x1617, 0xe99: 0x1627, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19d8, 0xec1: 0x19db, 0xec2: 0x19de, 0xec3: 0x1c0b, 0xec4: 0x1c0f, 0xec5: 0x1a62, + 0xec6: 0x1a62, + 0xed3: 0x1d78, 0xed4: 0x1d69, 0xed5: 0x1d6e, 0xed6: 0x1d7d, 0xed7: 0x1d73, + 0xedd: 0x4393, + 0xede: 0x8115, 0xedf: 0x4405, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221, + 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017, + 0xeea: 0x43f3, 0xeeb: 0x43f9, 0xeec: 0x44f7, 0xeed: 0x44ff, 0xeee: 0x434b, 0xeef: 0x4351, + 0xef0: 0x4357, 0xef1: 0x435d, 0xef2: 0x4369, 0xef3: 0x436f, 0xef4: 0x4375, 0xef5: 0x4381, + 0xef6: 0x4387, 0xef8: 0x438d, 0xef9: 0x4399, 0xefa: 0x439f, 0xefb: 0x43a5, + 0xefc: 0x43b1, 0xefe: 0x43b7, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43bd, 0xf01: 0x43c3, 0xf03: 0x43c9, 0xf04: 0x43cf, + 0xf06: 0x43db, 0xf07: 0x43e1, 0xf08: 0x43e7, 0xf09: 0x43ed, 0xf0a: 0x43ff, 0xf0b: 0x437b, + 0xf0c: 0x4363, 0xf0d: 0x43ab, 0xf0e: 0x43d5, 0xf0f: 0x1d82, 0xf10: 0x0299, 0xf11: 0x0299, + 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5, + 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab, + 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8, + 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c, + 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2, + 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1, + 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4, + 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd, + 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9, + 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5, + 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1, + 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de, + 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7, + 0xf64: 0x4471, 0xf65: 0x4471, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed, + 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308, + 0xf70: 0x446b, 0xf71: 0x446b, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6, + 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x2052, + 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc, + 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d, + 0xfaa: 0x25ad, 0xfab: 0x25ad, 0xfac: 0x261d, 0xfad: 0x261d, 0xfae: 0x25ec, 0xfaf: 0x25ec, + 0xfb0: 0x2608, 0xfb1: 0x2608, 0xfb2: 0x2601, 0xfb3: 0x2601, 0xfb4: 0x260f, 0xfb5: 0x260f, + 0xfb6: 0x2616, 0xfb7: 0x2616, 0xfb8: 0x2616, 0xfb9: 0x25f3, 0xfba: 0x25f3, 0xfbb: 0x25f3, + 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b4, 0xfc1: 0x25bb, 0xfc2: 0x25d7, 0xfc3: 0x25f3, 0xfc4: 0x25fa, 0xfc5: 0x1d8c, + 0xfc6: 0x1d91, 0xfc7: 0x1d96, 0xfc8: 0x1da5, 0xfc9: 0x1db4, 0xfca: 0x1db9, 0xfcb: 0x1dbe, + 0xfcc: 0x1dc3, 0xfcd: 0x1dc8, 0xfce: 0x1dd7, 0xfcf: 0x1de6, 0xfd0: 0x1deb, 0xfd1: 0x1df0, + 0xfd2: 0x1dff, 0xfd3: 0x1e0e, 0xfd4: 0x1e13, 0xfd5: 0x1e18, 0xfd6: 0x1e1d, 0xfd7: 0x1e2c, + 0xfd8: 0x1e31, 0xfd9: 0x1e40, 0xfda: 0x1e45, 0xfdb: 0x1e4a, 0xfdc: 0x1e59, 0xfdd: 0x1e5e, + 0xfde: 0x1e63, 0xfdf: 0x1e6d, 0xfe0: 0x1ea9, 0xfe1: 0x1eb8, 0xfe2: 0x1ec7, 0xfe3: 0x1ecc, + 0xfe4: 0x1ed1, 0xfe5: 0x1edb, 0xfe6: 0x1eea, 0xfe7: 0x1eef, 0xfe8: 0x1efe, 0xfe9: 0x1f03, + 0xfea: 0x1f08, 0xfeb: 0x1f17, 0xfec: 0x1f1c, 0xfed: 0x1f2b, 0xfee: 0x1f30, 0xfef: 0x1f35, + 0xff0: 0x1f3a, 0xff1: 0x1f3f, 0xff2: 0x1f44, 0xff3: 0x1f49, 0xff4: 0x1f4e, 0xff5: 0x1f53, + 0xff6: 0x1f58, 0xff7: 0x1f5d, 0xff8: 0x1f62, 0xff9: 0x1f67, 0xffa: 0x1f6c, 0xffb: 0x1f71, + 0xffc: 0x1f76, 0xffd: 0x1f7b, 0xffe: 0x1f80, 0xfff: 0x1f8a, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f8f, 0x1001: 0x1f94, 0x1002: 0x1f99, 0x1003: 0x1fa3, 0x1004: 0x1fa8, 0x1005: 0x1fb2, + 0x1006: 0x1fb7, 0x1007: 0x1fbc, 0x1008: 0x1fc1, 0x1009: 0x1fc6, 0x100a: 0x1fcb, 0x100b: 0x1fd0, + 0x100c: 0x1fd5, 0x100d: 0x1fda, 0x100e: 0x1fe9, 0x100f: 0x1ff8, 0x1010: 0x1ffd, 0x1011: 0x2002, + 0x1012: 0x2007, 0x1013: 0x200c, 0x1014: 0x2011, 0x1015: 0x201b, 0x1016: 0x2020, 0x1017: 0x2025, + 0x1018: 0x2034, 0x1019: 0x2043, 0x101a: 0x2048, 0x101b: 0x4423, 0x101c: 0x4429, 0x101d: 0x445f, + 0x101e: 0x44b6, 0x101f: 0x44bd, 0x1020: 0x44c4, 0x1021: 0x44cb, 0x1022: 0x44d2, 0x1023: 0x44d9, + 0x1024: 0x25c9, 0x1025: 0x25d0, 0x1026: 0x25d7, 0x1027: 0x25de, 0x1028: 0x25f3, 0x1029: 0x25fa, + 0x102a: 0x1d9b, 0x102b: 0x1da0, 0x102c: 0x1da5, 0x102d: 0x1daa, 0x102e: 0x1db4, 0x102f: 0x1db9, + 0x1030: 0x1dcd, 0x1031: 0x1dd2, 0x1032: 0x1dd7, 0x1033: 0x1ddc, 0x1034: 0x1de6, 0x1035: 0x1deb, + 0x1036: 0x1df5, 0x1037: 0x1dfa, 0x1038: 0x1dff, 0x1039: 0x1e04, 0x103a: 0x1e0e, 0x103b: 0x1e13, + 0x103c: 0x1f3f, 0x103d: 0x1f44, 0x103e: 0x1f53, 0x103f: 0x1f58, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f5d, 0x1041: 0x1f71, 0x1042: 0x1f76, 0x1043: 0x1f7b, 0x1044: 0x1f80, 0x1045: 0x1f99, + 0x1046: 0x1fa3, 0x1047: 0x1fa8, 0x1048: 0x1fad, 0x1049: 0x1fc1, 0x104a: 0x1fdf, 0x104b: 0x1fe4, + 0x104c: 0x1fe9, 0x104d: 0x1fee, 0x104e: 0x1ff8, 0x104f: 0x1ffd, 0x1050: 0x445f, 0x1051: 0x202a, + 0x1052: 0x202f, 0x1053: 0x2034, 0x1054: 0x2039, 0x1055: 0x2043, 0x1056: 0x2048, 0x1057: 0x25b4, + 0x1058: 0x25bb, 0x1059: 0x25c2, 0x105a: 0x25d7, 0x105b: 0x25e5, 0x105c: 0x1d8c, 0x105d: 0x1d91, + 0x105e: 0x1d96, 0x105f: 0x1da5, 0x1060: 0x1daf, 0x1061: 0x1dbe, 0x1062: 0x1dc3, 0x1063: 0x1dc8, + 0x1064: 0x1dd7, 0x1065: 0x1de1, 0x1066: 0x1dff, 0x1067: 0x1e18, 0x1068: 0x1e1d, 0x1069: 0x1e2c, + 0x106a: 0x1e31, 0x106b: 0x1e40, 0x106c: 0x1e4a, 0x106d: 0x1e59, 0x106e: 0x1e5e, 0x106f: 0x1e63, + 0x1070: 0x1e6d, 0x1071: 0x1ea9, 0x1072: 0x1eae, 0x1073: 0x1eb8, 0x1074: 0x1ec7, 0x1075: 0x1ecc, + 0x1076: 0x1ed1, 0x1077: 0x1edb, 0x1078: 0x1eea, 0x1079: 0x1efe, 0x107a: 0x1f03, 0x107b: 0x1f08, + 0x107c: 0x1f17, 0x107d: 0x1f1c, 0x107e: 0x1f2b, 0x107f: 0x1f30, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f35, 0x1081: 0x1f3a, 0x1082: 0x1f49, 0x1083: 0x1f4e, 0x1084: 0x1f62, 0x1085: 0x1f67, + 0x1086: 0x1f6c, 0x1087: 0x1f71, 0x1088: 0x1f76, 0x1089: 0x1f8a, 0x108a: 0x1f8f, 0x108b: 0x1f94, + 0x108c: 0x1f99, 0x108d: 0x1f9e, 0x108e: 0x1fb2, 0x108f: 0x1fb7, 0x1090: 0x1fbc, 0x1091: 0x1fc1, + 0x1092: 0x1fd0, 0x1093: 0x1fd5, 0x1094: 0x1fda, 0x1095: 0x1fe9, 0x1096: 0x1ff3, 0x1097: 0x2002, + 0x1098: 0x2007, 0x1099: 0x4453, 0x109a: 0x201b, 0x109b: 0x2020, 0x109c: 0x2025, 0x109d: 0x2034, + 0x109e: 0x203e, 0x109f: 0x25d7, 0x10a0: 0x25e5, 0x10a1: 0x1da5, 0x10a2: 0x1daf, 0x10a3: 0x1dd7, + 0x10a4: 0x1de1, 0x10a5: 0x1dff, 0x10a6: 0x1e09, 0x10a7: 0x1e6d, 0x10a8: 0x1e72, 0x10a9: 0x1e95, + 0x10aa: 0x1e9a, 0x10ab: 0x1f71, 0x10ac: 0x1f76, 0x10ad: 0x1f99, 0x10ae: 0x1fe9, 0x10af: 0x1ff3, + 0x10b0: 0x2034, 0x10b1: 0x203e, 0x10b2: 0x4507, 0x10b3: 0x450f, 0x10b4: 0x4517, 0x10b5: 0x1ef4, + 0x10b6: 0x1ef9, 0x10b7: 0x1f0d, 0x10b8: 0x1f12, 0x10b9: 0x1f21, 0x10ba: 0x1f26, 0x10bb: 0x1e77, + 0x10bc: 0x1e7c, 0x10bd: 0x1e9f, 0x10be: 0x1ea4, 0x10bf: 0x1e36, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e3b, 0x10c1: 0x1e22, 0x10c2: 0x1e27, 0x10c3: 0x1e4f, 0x10c4: 0x1e54, 0x10c5: 0x1ebd, + 0x10c6: 0x1ec2, 0x10c7: 0x1ee0, 0x10c8: 0x1ee5, 0x10c9: 0x1e81, 0x10ca: 0x1e86, 0x10cb: 0x1e8b, + 0x10cc: 0x1e95, 0x10cd: 0x1e90, 0x10ce: 0x1e68, 0x10cf: 0x1eb3, 0x10d0: 0x1ed6, 0x10d1: 0x1ef4, + 0x10d2: 0x1ef9, 0x10d3: 0x1f0d, 0x10d4: 0x1f12, 0x10d5: 0x1f21, 0x10d6: 0x1f26, 0x10d7: 0x1e77, + 0x10d8: 0x1e7c, 0x10d9: 0x1e9f, 0x10da: 0x1ea4, 0x10db: 0x1e36, 0x10dc: 0x1e3b, 0x10dd: 0x1e22, + 0x10de: 0x1e27, 0x10df: 0x1e4f, 0x10e0: 0x1e54, 0x10e1: 0x1ebd, 0x10e2: 0x1ec2, 0x10e3: 0x1ee0, + 0x10e4: 0x1ee5, 0x10e5: 0x1e81, 0x10e6: 0x1e86, 0x10e7: 0x1e8b, 0x10e8: 0x1e95, 0x10e9: 0x1e90, + 0x10ea: 0x1e68, 0x10eb: 0x1eb3, 0x10ec: 0x1ed6, 0x10ed: 0x1e81, 0x10ee: 0x1e86, 0x10ef: 0x1e8b, + 0x10f0: 0x1e95, 0x10f1: 0x1e72, 0x10f2: 0x1e9a, 0x10f3: 0x1eef, 0x10f4: 0x1e59, 0x10f5: 0x1e5e, + 0x10f6: 0x1e63, 0x10f7: 0x1e81, 0x10f8: 0x1e86, 0x10f9: 0x1e8b, 0x10fa: 0x1eef, 0x10fb: 0x1efe, + 0x10fc: 0x440b, 0x10fd: 0x440b, + // Block 0x44, offset 0x1100 + 0x1110: 0x2314, 0x1111: 0x2329, + 0x1112: 0x2329, 0x1113: 0x2330, 0x1114: 0x2337, 0x1115: 0x234c, 0x1116: 0x2353, 0x1117: 0x235a, + 0x1118: 0x237d, 0x1119: 0x237d, 0x111a: 0x23a0, 0x111b: 0x2399, 0x111c: 0x23b5, 0x111d: 0x23a7, + 0x111e: 0x23ae, 0x111f: 0x23d1, 0x1120: 0x23d1, 0x1121: 0x23ca, 0x1122: 0x23d8, 0x1123: 0x23d8, + 0x1124: 0x2402, 0x1125: 0x2402, 0x1126: 0x241e, 0x1127: 0x23e6, 0x1128: 0x23e6, 0x1129: 0x23df, + 0x112a: 0x23f4, 0x112b: 0x23f4, 0x112c: 0x23fb, 0x112d: 0x23fb, 0x112e: 0x2425, 0x112f: 0x2433, + 0x1130: 0x2433, 0x1131: 0x243a, 0x1132: 0x243a, 0x1133: 0x2441, 0x1134: 0x2448, 0x1135: 0x244f, + 0x1136: 0x2456, 0x1137: 0x2456, 0x1138: 0x245d, 0x1139: 0x246b, 0x113a: 0x2479, 0x113b: 0x2472, + 0x113c: 0x2480, 0x113d: 0x2480, 0x113e: 0x2495, 0x113f: 0x249c, + // Block 0x45, offset 0x1140 + 0x1140: 0x24cd, 0x1141: 0x24db, 0x1142: 0x24d4, 0x1143: 0x24b8, 0x1144: 0x24b8, 0x1145: 0x24e2, + 0x1146: 0x24e2, 0x1147: 0x24e9, 0x1148: 0x24e9, 0x1149: 0x2513, 0x114a: 0x251a, 0x114b: 0x2521, + 0x114c: 0x24f7, 0x114d: 0x2505, 0x114e: 0x2528, 0x114f: 0x252f, + 0x1152: 0x24fe, 0x1153: 0x2583, 0x1154: 0x258a, 0x1155: 0x2560, 0x1156: 0x2567, 0x1157: 0x254b, + 0x1158: 0x254b, 0x1159: 0x2552, 0x115a: 0x257c, 0x115b: 0x2575, 0x115c: 0x259f, 0x115d: 0x259f, + 0x115e: 0x230d, 0x115f: 0x2322, 0x1160: 0x231b, 0x1161: 0x2345, 0x1162: 0x233e, 0x1163: 0x2368, + 0x1164: 0x2361, 0x1165: 0x238b, 0x1166: 0x236f, 0x1167: 0x2384, 0x1168: 0x23bc, 0x1169: 0x2409, + 0x116a: 0x23ed, 0x116b: 0x242c, 0x116c: 0x24c6, 0x116d: 0x24f0, 0x116e: 0x2598, 0x116f: 0x2591, + 0x1170: 0x25a6, 0x1171: 0x253d, 0x1172: 0x24a3, 0x1173: 0x256e, 0x1174: 0x2495, 0x1175: 0x24cd, + 0x1176: 0x2464, 0x1177: 0x24b1, 0x1178: 0x2544, 0x1179: 0x2536, 0x117a: 0x24bf, 0x117b: 0x24aa, + 0x117c: 0x24bf, 0x117d: 0x2544, 0x117e: 0x2376, 0x117f: 0x2392, + // Block 0x46, offset 0x1180 + 0x1180: 0x250c, 0x1181: 0x2487, 0x1182: 0x2306, 0x1183: 0x24aa, 0x1184: 0x244f, 0x1185: 0x241e, + 0x1186: 0x23c3, 0x1187: 0x2559, + 0x11b0: 0x2417, 0x11b1: 0x248e, 0x11b2: 0x27c2, 0x11b3: 0x27b9, 0x11b4: 0x27ef, 0x11b5: 0x27dd, + 0x11b6: 0x27cb, 0x11b7: 0x27e6, 0x11b8: 0x27f8, 0x11b9: 0x2410, 0x11ba: 0x2c7f, 0x11bb: 0x2aff, + 0x11bc: 0x27d4, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf, + 0x11d8: 0x04c3, 0x11d9: 0x1b5f, + 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132, + 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d, + 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132, + 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab, + 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b, + // Block 0x48, offset 0x1200 + 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x426c, 0x120a: 0x426c, 0x120b: 0x426c, + 0x120c: 0x426c, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7, + 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42ad, 0x1231: 0x442f, 0x1232: 0x42b2, 0x1234: 0x42b7, + 0x1236: 0x42bc, 0x1237: 0x4435, 0x1238: 0x42c1, 0x1239: 0x443b, 0x123a: 0x42c6, 0x123b: 0x4441, + 0x123c: 0x42cb, 0x123d: 0x4447, 0x123e: 0x42d0, 0x123f: 0x444d, + // Block 0x49, offset 0x1240 + 0x1240: 0x0236, 0x1241: 0x4411, 0x1242: 0x4411, 0x1243: 0x4417, 0x1244: 0x4417, 0x1245: 0x4459, + 0x1246: 0x4459, 0x1247: 0x441d, 0x1248: 0x441d, 0x1249: 0x4465, 0x124a: 0x4465, 0x124b: 0x4465, + 0x124c: 0x4465, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c, + 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242, + 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248, + 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b, + 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251, + 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a, + 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260, + 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263, + 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c, + 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f, + 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275, + 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278, + 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e, + 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281, + 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287, + 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d, + 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e03, + 0x12b6: 0x2e03, 0x12b7: 0x2e0b, 0x12b8: 0x2e0b, 0x12b9: 0x2e13, 0x12ba: 0x2e13, 0x12bb: 0x1f85, + 0x12bc: 0x1f85, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f, + 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7, + 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f, + 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb, + 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503, + 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f, + 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547, + 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f, + 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583, + 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7, + 0x131e: 0x4a7b, 0x131f: 0x4a81, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3e, + 0x1324: 0x031b, 0x1325: 0x4a44, 0x1326: 0x4a4a, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327, + 0x132a: 0x4a50, 0x132b: 0x4a56, 0x132c: 0x4a5c, 0x132d: 0x4a62, 0x132e: 0x4a68, 0x132f: 0x4a6e, + 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337, + 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f, + 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49c0, 0x1343: 0x49c6, 0x1344: 0x49cc, 0x1345: 0x49d2, + 0x1346: 0x49d8, 0x1347: 0x49de, 0x134a: 0x49e4, 0x134b: 0x49ea, + 0x134c: 0x49f0, 0x134d: 0x49f6, 0x134e: 0x49fc, 0x134f: 0x4a02, + 0x1352: 0x4a08, 0x1353: 0x4a0e, 0x1354: 0x4a14, 0x1355: 0x4a1a, 0x1356: 0x4a20, 0x1357: 0x4a26, + 0x135a: 0x4a2c, 0x135b: 0x4a32, 0x135c: 0x4a38, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4267, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b, + 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176, + 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188, + 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a, + 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9, + 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0, + 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4, + 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1442: 0x0248, + 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e, + 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4, + 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248, + 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + 0x147c: 0x0293, 0x147e: 0x02cc, + // Block 0x52, offset 0x1480 + 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a, + 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e, + 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263, + 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e, + 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272, + 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251, + 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290, + 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f, + 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242, + 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8, + 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927, + 0x14d0: 0x1a8f, 0x14d1: 0x1a93, + 0x14d2: 0x1a97, 0x14d3: 0x1a9b, 0x14d4: 0x1a9f, 0x14d5: 0x1aa3, 0x14d6: 0x1aa7, 0x14d7: 0x1aab, + 0x14d8: 0x1aaf, 0x14d9: 0x1ab3, 0x14da: 0x1ab7, 0x14db: 0x1abb, 0x14dc: 0x1abf, 0x14dd: 0x1ac3, + 0x14de: 0x1ac7, 0x14df: 0x1acb, 0x14e0: 0x1acf, 0x14e1: 0x1ad3, 0x14e2: 0x1ad7, 0x14e3: 0x1adb, + 0x14e4: 0x1adf, 0x14e5: 0x1ae3, 0x14e6: 0x1ae7, 0x14e7: 0x1aeb, 0x14e8: 0x1aef, 0x14e9: 0x1af3, + 0x14ea: 0x2721, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b4, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26b0, 0x1501: 0x26c5, 0x1502: 0x0503, + 0x1510: 0x0c0f, 0x1511: 0x0a47, + 0x1512: 0x08d3, 0x1513: 0x45c7, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff, + 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b, + 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b, + 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf, + 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b, + 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43, + 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757, + 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773, + 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3, + 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf, + 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff, + 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f, + 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867, + 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af, + 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93, + 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3, + 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917, + // Block 0x56, offset 0x1580 + 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f, + 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983, + 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf, + 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3, + 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef, + 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23, + 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37, + 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63, + 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f, + 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692, + 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb, + 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f, + 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6, + 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9, + 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83, + 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3, + 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf, + 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7, + 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f, + 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b, + 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87, + 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb, + 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7, + 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663, + 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd, + 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7, + 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b, + 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f, + 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7, + 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700, + 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23, + 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53, + 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714, + 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b, + 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719, + 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728, + 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37, + 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57, + 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737, + 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741, + 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff, + 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637, + 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f, + 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093, + 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782, + 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3, + 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7, + 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133, + 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa, + 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4, + 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7, + 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7, + 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b, + 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd, + 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f, + 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f, + 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273, + 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677, + 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7, + 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb, + 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5, + 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa, + 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b, + 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7, + 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665, + 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f, + 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477, + 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693, + 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb, + 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b, + 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567, + 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7, + 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7, + 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef, + 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + 0x374: 0xcb, + 0x37d: 0xcc, + // Block 0xe, offset 0x380 + 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0, + 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4, + 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9, + 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc, + 0x3a0: 0xdd, 0x3a7: 0xde, + 0x3a8: 0xdf, 0x3a9: 0xe0, 0x3aa: 0xe1, + 0x3b0: 0xda, 0x3b5: 0xe2, 0x3b6: 0xe3, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe4, 0x3ec: 0xe5, + // Block 0x10, offset 0x400 + 0x432: 0xe6, + // Block 0x11, offset 0x440 + 0x445: 0xe7, 0x446: 0xe8, 0x447: 0xe9, + 0x449: 0xea, + 0x450: 0xeb, 0x451: 0xec, 0x452: 0xed, 0x453: 0xee, 0x454: 0xef, 0x455: 0xf0, 0x456: 0xf1, 0x457: 0xf2, + 0x458: 0xf3, 0x459: 0xf4, 0x45a: 0x4c, 0x45b: 0xf5, 0x45c: 0xf6, 0x45d: 0xf7, 0x45e: 0xf8, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xf9, 0x484: 0xe5, + 0x48b: 0xfa, + 0x4a3: 0xfb, 0x4a5: 0xfc, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0xfd, 0x4c6: 0xfe, + 0x4c8: 0x52, 0x4c9: 0xff, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 164 entries, 328 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xdf, 0xe3, 0xe9, 0xfa, 0x106, 0x108, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x118, 0x11a, 0x11c, 0x11f, 0x122, 0x124, 0x127, 0x12a, 0x12e, 0x133, 0x13c, 0x13e, 0x141, 0x143, 0x14e, 0x159, 0x167, 0x175, 0x185, 0x193, 0x19a, 0x1a0, 0x1af, 0x1b3, 0x1b5, 0x1b9, 0x1bb, 0x1be, 0x1c0, 0x1c3, 0x1c5, 0x1c8, 0x1ca, 0x1cc, 0x1ce, 0x1da, 0x1e4, 0x1ee, 0x1f1, 0x1f5, 0x1f7, 0x1f9, 0x1fb, 0x1fd, 0x200, 0x202, 0x204, 0x206, 0x208, 0x20e, 0x211, 0x215, 0x217, 0x21e, 0x224, 0x22a, 0x232, 0x238, 0x23e, 0x244, 0x248, 0x24a, 0x24c, 0x24e, 0x250, 0x256, 0x259, 0x25b, 0x261, 0x264, 0x26c, 0x273, 0x276, 0x279, 0x27b, 0x27e, 0x286, 0x28a, 0x291, 0x294, 0x29a, 0x29c, 0x29e, 0x2a1, 0x2a3, 0x2a6, 0x2a8, 0x2aa, 0x2ac, 0x2ae, 0x2b1, 0x2b3, 0x2b5, 0x2b7, 0x2b9, 0x2c6, 0x2d0, 0x2d2, 0x2d4, 0x2d8, 0x2dd, 0x2e9, 0x2ee, 0x2f7, 0x2fd, 0x302, 0x306, 0x30b, 0x30f, 0x31f, 0x32d, 0x33b, 0x349, 0x34f, 0x351, 0x353, 0x356, 0x361, 0x363} + +// nfkcSparseValues: 877 entries, 3508 bytes +var nfkcSparseValues = [877]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x427b, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4267, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425d, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4294, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221f, lo: 0xbc, hi: 0xbc}, + {value: 0x2213, lo: 0xbd, hi: 0xbd}, + {value: 0x22b5, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e5, lo: 0xa0, hi: 0xa1}, + {value: 0x4717, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x4271, lo: 0x98, hi: 0x98}, + {value: 0x4276, lo: 0x99, hi: 0x9a}, + {value: 0x4299, lo: 0x9b, hi: 0x9b}, + {value: 0x4262, lo: 0x9c, hi: 0x9c}, + {value: 0x4285, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a8, lo: 0x90, hi: 0x90}, + {value: 0x37b4, lo: 0x91, hi: 0x91}, + {value: 0x37a2, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x381a, lo: 0x97, hi: 0x97}, + {value: 0x37e4, lo: 0x9c, hi: 0x9c}, + {value: 0x37cc, lo: 0x9d, hi: 0x9d}, + {value: 0x37f6, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3820, lo: 0xb6, hi: 0xb6}, + {value: 0x3826, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3844, lo: 0xa2, hi: 0xa2}, + {value: 0x384a, lo: 0xa3, hi: 0xa3}, + {value: 0x3856, lo: 0xa4, hi: 0xa4}, + {value: 0x3850, lo: 0xa5, hi: 0xa5}, + {value: 0x385c, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386e, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3862, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3868, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3edb, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee3, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eeb, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451f, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ca1, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455f, lo: 0x9c, hi: 0x9d}, + {value: 0x456f, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x4597, lo: 0xb3, hi: 0xb3}, + {value: 0x459f, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4577, lo: 0x99, hi: 0x9b}, + {value: 0x458f, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb9, lo: 0x88, hi: 0x88}, + {value: 0x2cb1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cc1, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a7, lo: 0x9c, hi: 0x9c}, + {value: 0x45af, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc9, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cd1, lo: 0x8a, hi: 0x8a}, + {value: 0x2ce1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd9, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef3, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce9, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cf1, lo: 0x87, hi: 0x87}, + {value: 0x2cf9, lo: 0x88, hi: 0x88}, + {value: 0x2f53, lo: 0x8a, hi: 0x8a}, + {value: 0x2ddb, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d01, lo: 0x8a, hi: 0x8a}, + {value: 0x2d11, lo: 0x8b, hi: 0x8b}, + {value: 0x2d09, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6be7, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3efb, lo: 0x9a, hi: 0x9a}, + {value: 0x2f5b, lo: 0x9c, hi: 0x9c}, + {value: 0x2de6, lo: 0x9d, hi: 0x9d}, + {value: 0x2d19, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2624, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x03}, + {value: 0x2639, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xdf + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x262b, lo: 0x9c, hi: 0x9c}, + {value: 0x2632, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe3 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe9 + {value: 0x0000, lo: 0x10}, + {value: 0x2647, lo: 0x83, hi: 0x83}, + {value: 0x264e, lo: 0x8d, hi: 0x8d}, + {value: 0x2655, lo: 0x92, hi: 0x92}, + {value: 0x265c, lo: 0x97, hi: 0x97}, + {value: 0x2663, lo: 0x9c, hi: 0x9c}, + {value: 0x2640, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a87, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a90, lo: 0xb5, hi: 0xb5}, + {value: 0x45b7, lo: 0xb6, hi: 0xb6}, + {value: 0x45f7, lo: 0xb7, hi: 0xb7}, + {value: 0x45bf, lo: 0xb8, hi: 0xb8}, + {value: 0x4602, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xfa + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a99, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x2671, lo: 0x93, hi: 0x93}, + {value: 0x2678, lo: 0x9d, hi: 0x9d}, + {value: 0x267f, lo: 0xa2, hi: 0xa2}, + {value: 0x2686, lo: 0xa7, hi: 0xa7}, + {value: 0x268d, lo: 0xac, hi: 0xac}, + {value: 0x266a, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x108 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d21, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x116 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x118 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x11a + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11f + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x124 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x127 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x12a + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x133 + {value: 0x0000, lo: 0x08}, + {value: 0x2d69, lo: 0x80, hi: 0x80}, + {value: 0x2d71, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d79, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x13c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x13e + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x141 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x143 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x14e + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x159 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429e, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5f, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2694, lo: 0xb3, hi: 0xb3}, + {value: 0x2801, lo: 0xb4, hi: 0xb4}, + {value: 0x269b, lo: 0xb6, hi: 0xb6}, + {value: 0x280b, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x426c, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x167 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x2991, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x175 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x199c, lo: 0xa8, hi: 0xa8}, + // Block 0x3f, offset 0x185 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x40, offset 0x193 + {value: 0x0007, lo: 0x06}, + {value: 0x2183, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bbc, lo: 0x9a, hi: 0x9b}, + {value: 0x3bca, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x19a + {value: 0x000e, lo: 0x05}, + {value: 0x3bd1, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd8, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x1a0 + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be6, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bed, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf4, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bfb, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3c02, lo: 0xa6, hi: 0xa6}, + {value: 0x26a2, lo: 0xac, hi: 0xad}, + {value: 0x26a9, lo: 0xaf, hi: 0xaf}, + {value: 0x281f, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x1af + {value: 0x0007, lo: 0x03}, + {value: 0x3c6b, lo: 0xa0, hi: 0xa1}, + {value: 0x3c95, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbf, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x1b3 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x1b5 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x46, offset 0x1b9 + {value: 0x0000, lo: 0x01}, + {value: 0x299e, lo: 0x8c, hi: 0x8c}, + // Block 0x47, offset 0x1bb + {value: 0x0266, lo: 0x02}, + {value: 0x1b8f, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x48, offset 0x1be + {value: 0x0000, lo: 0x01}, + {value: 0x44e0, lo: 0x9c, hi: 0x9c}, + // Block 0x49, offset 0x1c0 + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4a, offset 0x1c3 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4b, offset 0x1c5 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4c, offset 0x1c8 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4d, offset 0x1ca + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4e, offset 0x1cc + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x4f, offset 0x1ce + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x50, offset 0x1da + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x51, offset 0x1e4 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3e, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a44, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a50, lo: 0xba, hi: 0xbf}, + // Block 0x52, offset 0x1ee + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x53, offset 0x1f1 + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x54, offset 0x1f5 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x55, offset 0x1f7 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x56, offset 0x1f9 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x57, offset 0x1fb + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x58, offset 0x1fd + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x59, offset 0x200 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5a, offset 0x202 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5b, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5c, offset 0x206 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5d, offset 0x208 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x20e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5f, offset 0x211 + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x60, offset 0x215 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x61, offset 0x217 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x62, offset 0x21e + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x63, offset 0x224 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x64, offset 0x22a + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x65, offset 0x232 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x66, offset 0x238 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x67, offset 0x23e + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x68, offset 0x244 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x69, offset 0x248 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6a, offset 0x24a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6b, offset 0x24c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6c, offset 0x24e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6d, offset 0x250 + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x256 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x6f, offset 0x259 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x70, offset 0x25b + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x71, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x264 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x423b, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4245, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424f, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x26c + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d81, lo: 0xae, hi: 0xae}, + {value: 0x2d8b, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x273 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x276 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x279 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x27b + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x78, offset 0x27e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d95, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9f, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x79, offset 0x286 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7a, offset 0x28a + {value: 0x6b57, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db3, lo: 0xbb, hi: 0xbb}, + {value: 0x2da9, lo: 0xbc, hi: 0xbd}, + {value: 0x2dbd, lo: 0xbe, hi: 0xbe}, + // Block 0x7b, offset 0x291 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7c, offset 0x294 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc7, lo: 0xba, hi: 0xba}, + {value: 0x2dd1, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x29a + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7e, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x29e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x80, offset 0x2a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x81, offset 0x2a3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x82, offset 0x2a6 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + // Block 0x83, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x84, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x85, offset 0x2ac + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x86, offset 0x2ae + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x87, offset 0x2b1 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x88, offset 0x2b3 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x89, offset 0x2b5 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8a, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8b, offset 0x2b9 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cf, lo: 0x9e, hi: 0x9e}, + {value: 0x45d9, lo: 0x9f, hi: 0x9f}, + {value: 0x460d, lo: 0xa0, hi: 0xa0}, + {value: 0x461b, lo: 0xa1, hi: 0xa1}, + {value: 0x4629, lo: 0xa2, hi: 0xa2}, + {value: 0x4637, lo: 0xa3, hi: 0xa3}, + {value: 0x4645, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8c, offset 0x2c6 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e3, lo: 0xbb, hi: 0xbb}, + {value: 0x45ed, lo: 0xbc, hi: 0xbc}, + {value: 0x4653, lo: 0xbd, hi: 0xbd}, + {value: 0x466f, lo: 0xbe, hi: 0xbe}, + {value: 0x4661, lo: 0xbf, hi: 0xbf}, + // Block 0x8d, offset 0x2d0 + {value: 0x0000, lo: 0x01}, + {value: 0x467d, lo: 0x80, hi: 0x80}, + // Block 0x8e, offset 0x2d2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8f, offset 0x2d4 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x90, offset 0x2d8 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x91, offset 0x2dd + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x92, offset 0x2e9 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x93, offset 0x2ee + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x94, offset 0x2f7 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x95, offset 0x2fd + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x96, offset 0x302 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x97, offset 0x306 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x98, offset 0x30b + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x30f + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x9a, offset 0x31f + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x9b, offset 0x32d + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x33b + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x9d, offset 0x349 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x9e, offset 0x34f + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xac, hi: 0xaf}, + // Block 0x9f, offset 0x351 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0xa0, offset 0x353 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0xa1, offset 0x356 + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1984, lo: 0x8b, hi: 0x8b}, + {value: 0x199f, lo: 0x8c, hi: 0x8c}, + {value: 0x19a5, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc3, lo: 0x8e, hi: 0x8e}, + {value: 0x19b1, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + {value: 0x1981, lo: 0xac, hi: 0xac}, + // Block 0xa2, offset 0x361 + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0xa3, offset 0x363 + {value: 0x0028, lo: 0x09}, + {value: 0x2865, lo: 0x80, hi: 0x80}, + {value: 0x2829, lo: 0x81, hi: 0x81}, + {value: 0x2833, lo: 0x82, hi: 0x82}, + {value: 0x2847, lo: 0x83, hi: 0x84}, + {value: 0x2851, lo: 0x85, hi: 0x86}, + {value: 0x283d, lo: 0x87, hi: 0x87}, + {value: 0x285b, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 55KB (55977 bytes) diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vendor/golang.org/x/tools/cmd/goimports/doc.go new file mode 100644 index 00000000..7033e4d4 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/doc.go @@ -0,0 +1,43 @@ +/* + +Command goimports updates your Go import lines, +adding missing ones and removing unreferenced ones. + + $ go get golang.org/x/tools/cmd/goimports + +In addition to fixing imports, goimports also formats +your code in the same style as gofmt so it can be used +as a replacement for your editor's gofmt-on-save hook. + +For emacs, make sure you have the latest go-mode.el: + https://github.com/dominikh/go-mode.el +Then in your .emacs file: + (setq gofmt-command "goimports") + (add-hook 'before-save-hook 'gofmt-before-save) + +For vim, set "gofmt_command" to "goimports": + https://golang.org/change/39c724dd7f252 + https://golang.org/wiki/IDEsAndTextEditorPlugins + etc + +For GoSublime, follow the steps described here: + http://michaelwhatcott.com/gosublime-goimports/ + +For other editors, you probably know what to do. + +To exclude directories in your $GOPATH from being scanned for Go +files, goimports respects a configuration file at +$GOPATH/src/.goimportsignore which may contain blank lines, comment +lines (beginning with '#'), or lines naming a directory relative to +the configuration file to ignore when scanning. No globbing or regex +patterns are allowed. Use the "-v" verbose flag to verify it's +working and see what goimports is doing. + +File bugs or feature requests at: + + https://golang.org/issues/new?title=x/tools/cmd/goimports:+ + +Happy hacking! + +*/ +package main // import "golang.org/x/tools/cmd/goimports" diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go new file mode 100644 index 00000000..27708972 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -0,0 +1,380 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/scanner" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" +) + +var ( + // main operation modes + list = flag.Bool("l", false, "list files whose formatting differs from goimport's") + write = flag.Bool("w", false, "write result to (source) file instead of stdout") + doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") + srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.") + + verbose bool // verbose logging + + cpuProfile = flag.String("cpuprofile", "", "CPU profile output") + memProfile = flag.String("memprofile", "", "memory profile output") + memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate") + + options = &imports.Options{ + TabWidth: 8, + TabIndent: true, + Comments: true, + Fragment: true, + Env: &imports.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + } + exitCode = 0 +) + +func init() { + flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") + flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") + flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.") +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(2) +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +// argumentType is which mode goimports was invoked as. +type argumentType int + +const ( + // fromStdin means the user is piping their source into goimports. + fromStdin argumentType = iota + + // singleArg is the common case from editors, when goimports is run on + // a single file. + singleArg + + // multipleArg is when the user ran "goimports file1.go file2.go" + // or ran goimports on a directory tree. + multipleArg +) + +func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error { + opt := options + if argType == fromStdin { + nopt := *options + nopt.Fragment = true + opt = &nopt + } + + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + target := filename + if *srcdir != "" { + // Determine whether the provided -srcdirc is a directory or file + // and then use it to override the target. + // + // See https://github.com/dominikh/go-mode.el/issues/146 + if isFile(*srcdir) { + if argType == multipleArg { + return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories") + } + target = *srcdir + } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) { + // For a file which doesn't exist on disk yet, but might shortly. + // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk. + // The goimports on-save hook writes the buffer to a temp file + // first and runs goimports before the actual save to newfile.go. + // The editor's buffer is named "newfile.go" so that is passed to goimports as: + // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go + // and then the editor reloads the result from the tmp file and writes + // it to newfile.go. + target = *srcdir + } else { + // Pretend that file is from *srcdir in order to decide + // visible imports correctly. + target = filepath.Join(*srcdir, filepath.Base(filename)) + } + } + + res, err := imports.Process(target, src, opt) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(out, filename) + } + if *write { + if argType == fromStdin { + // filename is "" + return errors.New("can't use -w on stdin") + } + // On Windows, we need to re-set the permissions from the file. See golang/go#38225. + var perms os.FileMode + if fi, err := os.Stat(filename); err == nil { + perms = fi.Mode() & os.ModePerm + } + err = ioutil.WriteFile(filename, res, perms) + if err != nil { + return err + } + } + if *doDiff { + if argType == fromStdin { + filename = "stdin.go" // because .orig looks silly + } + data, err := diff(src, res, filename) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + out.Write(data) + } + } + + if !*list && !*write && !*doDiff { + _, err = out.Write(res) + } + + return err +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, nil, os.Stdout, multipleArg) + } + if err != nil { + report(err) + } + return nil +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + + // call gofmtMain in a separate function + // so that it can use defer and have them + // run before the exit. + gofmtMain() + os.Exit(exitCode) +} + +// parseFlags parses command line flags and returns the paths to process. +// It's a var so that custom implementations can replace it in other files. +var parseFlags = func() []string { + flag.BoolVar(&verbose, "v", false, "verbose logging") + + flag.Parse() + return flag.Args() +} + +func bufferedFileWriter(dest string) (w io.Writer, close func()) { + f, err := os.Create(dest) + if err != nil { + log.Fatal(err) + } + bw := bufio.NewWriter(f) + return bw, func() { + if err := bw.Flush(); err != nil { + log.Fatalf("error flushing %v: %v", dest, err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } + } +} + +func gofmtMain() { + flag.Usage = usage + paths := parseFlags() + + if *cpuProfile != "" { + bw, flush := bufferedFileWriter(*cpuProfile) + pprof.StartCPUProfile(bw) + defer flush() + defer pprof.StopCPUProfile() + } + // doTrace is a conditionally compiled wrapper around runtime/trace. It is + // used to allow goimports to compile under gccgo, which does not support + // runtime/trace. See https://golang.org/issue/15544. + defer doTrace()() + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + bw, flush := bufferedFileWriter(*memProfile) + defer func() { + runtime.GC() // materialize all statistics + if err := pprof.WriteHeapProfile(bw); err != nil { + log.Fatal(err) + } + flush() + }() + } + + if verbose { + log.SetFlags(log.LstdFlags | log.Lmicroseconds) + options.Env.Logf = log.Printf + } + if options.TabWidth < 0 { + fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth) + exitCode = 2 + return + } + + if len(paths) == 0 { + if err := processFile("", os.Stdin, os.Stdout, fromStdin); err != nil { + report(err) + } + return + } + + argType := singleArg + if len(paths) > 1 { + argType = multipleArg + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, nil, os.Stdout, argType); err != nil { + report(err) + } + } + } +} + +func writeTempFile(dir, prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} + +func diff(b1, b2 []byte, filename string) (data []byte, err error) { + f1, err := writeTempFile("", "gofmt", b1) + if err != nil { + return + } + defer os.Remove(f1) + + f2, err := writeTempFile("", "gofmt", b2) + if err != nil { + return + } + defer os.Remove(f2) + + cmd := "diff" + if runtime.GOOS == "plan9" { + cmd = "/bin/ape/diff" + } + + data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + return replaceTempFilename(data, filename) + } + return +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +// isFile reports whether name is a file. +func isFile(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.Mode().IsRegular() +} + +// isDir reports whether name is a directory. +func isDir(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.IsDir() +} diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go new file mode 100644 index 00000000..21d867ea --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go @@ -0,0 +1,26 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +package main + +import ( + "flag" + "runtime/trace" +) + +var traceProfile = flag.String("trace", "", "trace profile output") + +func doTrace() func() { + if *traceProfile != "" { + bw, flush := bufferedFileWriter(*traceProfile) + trace.Start(bw) + return func() { + flush() + trace.Stop() + } + } + return func() {} +} diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go new file mode 100644 index 00000000..f5531ceb --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gc + +package main + +func doTrace() func() { + return func() {} +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000..6b7052b8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000..2087ceec --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,482 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000..cf72ea99 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,477 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000..76306298 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 00000000..f8363d8f --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + b, err := gcimporter.IExportData(fset, pkg) + if err != nil { + return err + } + _, err = out.Write(b) + return err +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 00000000..efe221e7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 00000000..a807d0aa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 00000000..e9f73d14 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1039 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 00000000..f33dc561 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 00000000..8dcd8bbb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageID() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageID() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return newInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageID(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 00000000..4be32a2e --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,739 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + localpkg: pkg, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[w.p.localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + localpkg *types.Package + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 00000000..a31a8802 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,630 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + const currentVersion = 1 + version := int64(-1) + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + version = int64(r.uint64()) + switch version { + case currentVersion, 0: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + version: int(version), + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + p.ipkg = pkgList[0] + names := make([]string, 0, len(p.pkgIndex[p.ipkg])) + for name := range p.pkgIndex[p.ipkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(p.ipkg, name) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + p.ipkg.SetImports(list) + + // package was imported completely and without errors + p.ipkg.MarkComplete() + + consumed, _ := r.Seek(0, io.SeekCurrent) + return int(consumed), p.ipkg, nil +} + +type iimporter struct { + ipath string + ipkg *types.Package + version int + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + if path == p.ipath { + return p.ipkg + } + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.version >= 1 { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 00000000..463f2522 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 00000000..ab28b95c --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 00000000..e37b4949 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that ocurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 00000000..05f3a9a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 00000000..06c1d461 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 00000000..5dc6e6ba --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 00000000..4d55e577 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 00000000..a02206e3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 00000000..7e958665 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 00000000..b55c12eb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,213 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: unsafe.Pointer(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(unsafe.Pointer)) + hdr.Len = int(t.packed) + return *(*string)(unsafe.Pointer(hdr)) +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 00000000..9887f7e7 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == ErrTraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 00000000..ccffec5a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 00000000..ab7fbc0a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 00000000..a3b26a7b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 00000000..e880d358 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 00000000..b0d6327a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == ErrSkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 00000000..5901a8f6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,128 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == ErrSkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/37269 + dirent := &syscall.Dirent{} + copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 00000000..f516e176 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,230 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + switch i.Verb { + case "mod": + // mod needs the sub-verb before build flags. + goArgs = append(goArgs, i.Args[0]) + goArgs = append(goArgs, i.BuildFlags...) + goArgs = append(goArgs, i.Args[1:]...) + case "env": + // env doesn't take build flags. + goArgs = append(goArgs, i.Args...) + default: + goArgs = append(goArgs, i.BuildFlags...) + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(os.Environ(), i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + // Cancelled. Interrupt and see if it ends voluntarily. + cmd.Process.Signal(os.Interrupt) + select { + case err := <-resChan: + return err + case <-time.After(time.Second): + } + // Didn't shut down in response to interrupt. Kill it hard. + cmd.Process.Kill() + return <-resChan +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 00000000..1cd8d847 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Replace *ModuleJSON // replaced by this module + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file for this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return nil, false, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + if modFlag != "" { + // Don't override an explicit '-mod=' argument. + return mainMod, modFlag == "vendor", nil + } + if mainMod == nil || !go114 { + return mainMod, false, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return mainMod, true, nil + } + } + return mainMod, false, nil +} + +// getMainModuleAnd114 gets the main module's information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000..390cb9db --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,275 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots(ctx *build.Context) []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(ctx.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called (concurrently) +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// add will be called concurrently. +// skip will be called concurrently. +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +// walkDir creates a walker and starts fastwalk with this walker. +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Logf != nil { + opts.Logf("skipping nonexistent directory: %v", root.Path) + } + return + } + start := time.Now() + if opts.Logf != nil { + opts.Logf("gopathwalk: scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Logf != nil { + opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) + } + } else if w.opts.Logf != nil { + w.opts.Logf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Logf != nil { + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +// shouldSkipDir reports whether the file should be skipped or not. +func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +// walk walks through the given path. +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.ErrSkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.ErrSkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi, path) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.ErrTraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts, dir) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go new file mode 100644 index 00000000..ecd13e87 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -0,0 +1,1647 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" +) + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { + return + } + for _, p := range strings.Split(localPrefix, ",") { + if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { + return 3, true + } + } + return + }, + func(_, importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(_, importPath string) (num int, ok bool) { + firstComponent := strings.Split(importPath, "/")[0] + if strings.Contains(firstComponent, ".") { + return 1, true + } + return + }, +} + +func importGroup(localPrefix, importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(localPrefix, importPath); ok { + return n + } + } + return 0 +} + +type ImportFixType int + +const ( + AddImport ImportFixType = iota + DeleteImport + SetImportName +) + +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType + Relevance int // see pkg +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A packageInfo represents what's known about a package. +type packageInfo struct { + name string // real package name, if known. + exports map[string]bool // known exports. +} + +// parseOtherFiles parses all the Go files in srcDir except filename, including +// test files if filename looks like a test. +func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { + // This could use go/packages but it doesn't buy much, and it fails + // with https://golang.org/issue/26296 in LoadFiles mode in some cases. + considerTests := strings.HasSuffix(filename, "_test.go") + + fileBase := filepath.Base(filename) + packageFileInfos, err := ioutil.ReadDir(srcDir) + if err != nil { + return nil + } + + var files []*ast.File + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + files = append(files, f) + } + + return files +} + +// addGlobals puts the names of package vars into the provided map. +func addGlobals(f *ast.File, globals map[string]bool) { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + globals[valueSpec.Names[0].Name] = true + } + } +} + +// collectReferences builds a map of selector expressions, from +// left hand side (X) to a set of right hand sides (Sel). +func collectReferences(f *ast.File) references { + refs := references{} + + var visitor visitFn + visitor = func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // If the parser can resolve it, it's not a package ref. + break + } + if !ast.IsExported(v.Sel.Name) { + // Whatever this is, it's not exported from a package. + break + } + pkgName := xident.Name + r := refs[pkgName] + if r == nil { + r = make(map[string]bool) + refs[pkgName] = r + } + r[v.Sel.Name] = true + } + return visitor + } + ast.Walk(visitor, f) + return refs +} + +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo + for _, imp := range f.Imports { + var name string + if imp.Name != nil { + name = imp.Name.Name + } + if imp.Path.Value == `"C"` || name == "_" || name == "." { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, + }) + } + return imports +} + +// findMissingImport searches pass's candidates for an import that provides +// pkg, containing all of syms. +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { + for _, candidate := range p.candidates { + pkgInfo, ok := p.knownPackages[candidate.ImportPath] + if !ok { + continue + } + if p.importIdentifier(candidate) != pkg { + continue + } + + allFound := true + for right := range syms { + if !pkgInfo.exports[right] { + allFound = false + break + } + } + + if allFound { + return candidate + } + } + return nil +} + +// references is set of references found in a Go file. The first map key is the +// left hand side of a selector expression, the second key is the right hand +// side, and the value should always be true. +type references map[string]map[string]bool + +// A pass contains all the inputs and state necessary to fix a file's imports. +// It can be modified in some ways during use; see comments below. +type pass struct { + // Inputs. These must be set before a call to load, and not modified after. + fset *token.FileSet // fset used to parse f and its siblings. + f *ast.File // the file being fixed. + srcDir string // the directory containing f. + env *ProcessEnv // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + + // Intermediate state, generated by load. + existingImports map[string]*ImportInfo + allRefs references + missingRefs references + + // Inputs to fix. These can be augmented between successive fix calls. + lastTry bool // indicates that this is the last call and fix should clean up as best it can. + candidates []*ImportInfo // candidate imports in priority order. + knownPackages map[string]*packageInfo // information about all known packages. +} + +// loadPackageNames saves the package names for everything referenced by imports. +func (p *pass) loadPackageNames(imports []*ImportInfo) error { + if p.env.Logf != nil { + p.env.Logf("loading package names for %v packages", len(imports)) + defer func() { + p.env.Logf("done loading package names for %v packages", len(imports)) + }() + } + var unknown []string + for _, imp := range imports { + if _, ok := p.knownPackages[imp.ImportPath]; ok { + continue + } + unknown = append(unknown, imp.ImportPath) + } + + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) + if err != nil { + return err + } + + for path, name := range names { + p.knownPackages[path] = &packageInfo{ + name: name, + exports: map[string]bool{}, + } + } + return nil +} + +// importIdentifier returns the identifier that imp will introduce. It will +// guess if the package name has not been loaded, e.g. because the source +// is not available. +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name + } + known := p.knownPackages[imp.ImportPath] + if known != nil && known.name != "" { + return known.name + } + return ImportPathToAssumedName(imp.ImportPath) +} + +// load reads in everything necessary to run a pass, and reports whether the +// file already has all the imports it needs. It fills in p.missingRefs with the +// file's missing symbols, if any, or removes unused imports if not. +func (p *pass) load() ([]*ImportFix, bool) { + p.knownPackages = map[string]*packageInfo{} + p.missingRefs = references{} + p.existingImports = map[string]*ImportInfo{} + + // Load basic information about the file in question. + p.allRefs = collectReferences(p.f) + + // Load stuff from other files in the same package: + // global variables so we know they don't need resolving, and imports + // that we might want to mimic. + globals := map[string]bool{} + for _, otherFile := range p.otherFiles { + // Don't load globals from files that are in the same directory + // but a different package. Using them to suggest imports is OK. + if p.f.Name.Name == otherFile.Name.Name { + addGlobals(otherFile, globals) + } + p.candidates = append(p.candidates, collectImports(otherFile)...) + } + + // Resolve all the import paths we've seen to package names, and store + // f's imports by the identifier they introduce. + imports := collectImports(p.f) + if p.loadRealPackageNames { + err := p.loadPackageNames(append(imports, p.candidates...)) + if err != nil { + if p.env.Logf != nil { + p.env.Logf("loading package names: %v", err) + } + return nil, false + } + } + for _, imp := range imports { + p.existingImports[p.importIdentifier(imp)] = imp + } + + // Find missing references. + for left, rights := range p.allRefs { + if globals[left] { + continue + } + _, ok := p.existingImports[left] + if !ok { + p.missingRefs[left] = rights + continue + } + } + if len(p.missingRefs) != 0 { + return nil, false + } + + return p.fix() +} + +// fix attempts to satisfy missing imports using p.candidates. If it finds +// everything, or if p.lastTry is true, it updates fixes to add the imports it found, +// delete anything unused, and update import names, and returns true. +func (p *pass) fix() ([]*ImportFix, bool) { + // Find missing imports. + var selected []*ImportInfo + for left, rights := range p.missingRefs { + if imp := p.findMissingImport(left, rights); imp != nil { + selected = append(selected, imp) + } + } + + if !p.lastTry && len(selected) != len(p.missingRefs) { + return nil, false + } + + // Found everything, or giving up. Add the new imports and remove any unused. + var fixes []*ImportFix + for _, imp := range p.existingImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } + + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } + } + + for _, imp := range selected { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: AddImport, + }) + } + + return fixes, true +} + +// importSpecName gets the import name of imp in the import spec. +// +// When the import identifier matches the assumed import name, the import name does +// not appear in the import spec. +func (p *pass) importSpecName(imp *ImportInfo) string { + // If we did not load the real package names, or the name is already set, + // we just return the existing name. + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name + } + + ident := p.importIdentifier(imp) + if ident == ImportPathToAssumedName(imp.ImportPath) { + return "" // ident not needed since the assumed and real names are the same. + } + return ident +} + +// apply will perform the fixes on f in order. +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { + for _, fix := range fixes { + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: + // Find the matching import path and change the name. + for _, spec := range f.Imports { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { + spec.Name = &ast.Ident{ + Name: fix.StmtInfo.Name, + NamePos: spec.Pos(), + } + } + } + } + } +} + +// assumeSiblingImportsValid assumes that siblings' use of packages is valid, +// adding the exports they use. +func (p *pass) assumeSiblingImportsValid() { + for _, f := range p.otherFiles { + refs := collectReferences(f) + imports := collectImports(f) + importsByName := map[string]*ImportInfo{} + for _, imp := range imports { + importsByName[p.importIdentifier(imp)] = imp + } + for left, rights := range refs { + if imp, ok := importsByName[left]; ok { + if m, ok := stdlib[imp.ImportPath]; ok { + // We have the stdlib in memory; no need to guess. + rights = copyExports(m) + } + p.addCandidate(imp, &packageInfo{ + // no name; we already know it. + exports: rights, + }) + } + } + } +} + +// addCandidate adds a candidate import to p, and merges in the information +// in pkg. +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { + p.candidates = append(p.candidates, imp) + if existing, ok := p.knownPackages[imp.ImportPath]; ok { + if existing.name == "" { + existing.name = pkg.name + } + for export := range pkg.exports { + existing.exports[export] = true + } + } else { + p.knownPackages[imp.ImportPath] = pkg + } +} + +// fixImports adds and removes imports from f so that all its references are +// satisfied and there are no unused imports. +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { + fixes, err := getFixes(fset, f, filename, env) + if err != nil { + return err + } + apply(fset, f, fixes) + return err +} + +// getFixes gets the import fixes that need to be made to f in order to fix the imports. +// It does not modify the ast. +func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if env.Logf != nil { + env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + // First pass: looking only at f, and using the naive algorithm to + // derive package names from import paths, see if the file is already + // complete. We can't add any imports yet, because we don't know + // if missing references are actually package vars. + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} + if fixes, done := p.load(); done { + return fixes, nil + } + + otherFiles := parseOtherFiles(fset, srcDir, filename) + + // Second pass: add information from other files in the same package, + // like their package vars and imports. + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + // Now we can try adding imports from the stdlib. + p.assumeSiblingImportsValid() + addStdlibCandidates(p, p.missingRefs) + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Third pass: get real package names where we had previously used + // the naive algorithm. + p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p.loadRealPackageNames = true + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + addStdlibCandidates(p, p.missingRefs) + p.assumeSiblingImportsValid() + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Go look for candidates in $GOPATH, etc. We don't necessarily load + // the real exports of sibling imports, so keep assuming their contents. + if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + return nil, err + } + + p.lastTry = true + fixes, _ := p.fix() + return fixes, nil +} + +// Highest relevance, used for the standard library. Chosen arbitrarily to +// match pre-existing gopls code. +const MaxRelevance = 7 + +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } + // Start off with the standard library. + for importPath, exports := range stdlib { + p := &pkg{ + dir: filepath.Join(env.goroot(), "src", importPath), + importPathShort: importPath, + packageName: path.Base(importPath), + relevance: MaxRelevance, + } + if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } + } + + var mu sync.Mutex + dupCheck := map[string]struct{}{} + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, + } + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) +} + +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]int, error) { + result := make(map[string]int) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } + for _, path := range paths { + result[path] = resolver.scoreImportPath(ctx, path) + } + return result, nil +} + +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) +} + +func candidateImportName(pkg *pkg) string { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { + return pkg.packageName + } + return "" +} + +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// A PackageExport is a package and its exports. +type PackageExport struct { + Fix *ImportFix + Exports []string +} + +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} + +// ProcessEnv contains environment variables and settings that affect the use of +// the go command, the go/build package, etc. +type ProcessEnv struct { + GocmdRunner *gocommand.Runner + + BuildFlags []string + + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string + + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + + resolver Resolver +} + +func (e *ProcessEnv) goroot() string { + return e.mustGetEnv("GOROOT") +} + +func (e *ProcessEnv) gopath() string { + return e.mustGetEnv("GOPATH") +} + +func (e *ProcessEnv) mustGetEnv(k string) string { + v, ok := e.Env[k] + if !ok { + panic(fmt.Sprintf("%v not set in evaluated environment", k)) + } + return v +} + +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := *e + copy.resolver = nil + return © +} + +func (e *ProcessEnv) init() error { + foundAllRequired := true + for _, k := range RequiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break + } + } + if foundAllRequired { + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) + } + return env +} + +func (e *ProcessEnv) GetResolver() (Resolver, error) { + if e.resolver != nil { + return e.resolver, nil + } + if err := e.init(); err != nil { + return nil, err + } + if len(e.Env["GOMOD"]) == 0 { + e.resolver = newGopathResolver(e) + return e.resolver, nil + } + e.resolver = newModuleResolver(e) + return e.resolver, nil +} + +func (e *ProcessEnv) buildContext() *build.Context { + ctx := build.Default + ctx.GOROOT = e.goroot() + ctx.GOPATH = e.gopath() + + // As of Go 1.14, build.Context has a Dir field + // (see golang.org/issue/34860). + // Populate it only if present. + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if !dir.IsValid() { + // Working drafts of Go 1.14 named the field "WorkingDir" instead. + // TODO(bcmills): Remove this case after the Go 1.14 beta has been released. + dir = rc.FieldByName("WorkingDir") + } + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) + } + + return &ctx +} + +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, + } + return e.GocmdRunner.Run(ctx, inv) +} + +func addStdlibCandidates(pass *pass, refs references) { + add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.goroot(), "src", pkg) == pass.srcDir { + return + } + exports := copyExports(stdlib[pkg]) + pass.addCandidate( + &ImportInfo{ImportPath: pkg}, + &packageInfo{name: path.Base(pkg), exports: exports}) + } + for left := range refs { + if left == "rand" { + // Make sure we try crypto/rand before math/rand. + add("crypto/rand") + add("math/rand") + continue + } + for importPath := range stdlib { + if path.Base(importPath) == left { + add(importPath) + } + } + } +} + +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { + // loadPackageNames loads the package names in importPaths. + loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. + // loadExports may be called concurrently. + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) int + + ClearForNewScan() +} + +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) +} + +func addExternalCandidates(pass *pass, refs references, filename string) error { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := pass.env.GetResolver() + if err != nil { + return err + } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } + + // Search for imports matching potential package references. + type result struct { + imp *ImportInfo + pkg *packageInfo + } + results := make(chan result, len(refs)) + + ctx, cancel := context.WithCancel(context.TODO()) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + var ( + firstErr error + firstErrOnce sync.Once + ) + for pkgName, symbols := range refs { + wg.Add(1) + go func(pkgName string, symbols map[string]bool) { + defer wg.Done() + + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + + if err != nil { + firstErrOnce.Do(func() { + firstErr = err + cancel() + }) + return + } + + if found == nil { + return // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + + pkg := &packageInfo{ + name: pkgName, + exports: symbols, + } + results <- result{imp, pkg} + }(pkgName, symbols) + } + go func() { + wg.Wait() + close(results) + }() + + for result := range results { + pass.addCandidate(result.imp, result.pkg) + } + return firstErr +} + +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// ImportPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func ImportPathToAssumedName(importPath string) string { + base := path.Base(importPath) + if strings.HasPrefix(base, "v") { + if _, err := strconv.Atoi(base[1:]); err == nil { + dir := path.Dir(importPath) + if dir != "." { + base = path.Base(dir) + } + } + } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } + return base +} + +// gopathResolver implements resolver for GOPATH workspaces. +type gopathResolver struct { + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. +} + +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *gopathResolver) ClearForNewScan() { + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} +} + +func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + for _, path := range importPaths { + names[path] = importPathToName(r.env, path, srcDir) + } + return names, nil +} + +// importPathToName finds out the actual package name, as declared in its .go files. +// If there's a problem, it returns "". +func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) { + // Fast path for standard library without going to disk. + if _, ok := stdlib[importPath]; ok { + return path.Base(importPath) // stdlib packages always match their paths. + } + + buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "" + } + pkgName, err := packageDirToName(buildPkg.Dir) + if err != nil { + return "" + } + return pkgName +} + +// packageDirToName is a faster version of build.Import if +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, +// trusting that the files in the directory are consistent. +func packageDirToName(dir string) (packageName string, err error) { + d, err := os.Open(dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance int // a weakly-defined score of how relevant a package is. 0 is most relevant. +} + +type pkgDistance struct { + pkg *pkg + distance int // relative distance to target +} + +// byDistanceOrImportPathShortLength sorts by relative distance breaking ties +// on the short import path length and then the import string itself. +type byDistanceOrImportPathShortLength []pkgDistance + +func (s byDistanceOrImportPathShortLength) Len() int { return len(s) } +func (s byDistanceOrImportPathShortLength) Less(i, j int) bool { + di, dj := s[i].distance, s[j].distance + if di == -1 { + return false + } + if dj == -1 { + return true + } + if di != dj { + return di < dj + } + + vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort + if len(vi) != len(vj) { + return len(vi) < len(vj) + } + return vi < vj +} +func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func distance(basepath, targetpath string) int { + p, err := filepath.Rel(basepath, targetpath) + if err != nil { + return -1 + } + if p == "." { + return 0 + } + return strings.Count(p, string(filepath.Separator)) + 1 +} + +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { + add := func(root gopathwalk.Root, dir string) { + // We assume cached directories have not changed. We can skip them and their + // children. + if _, ok := r.cache.Load(dir); ok { + return + } + + importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) + info := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: VendorlessPath(importpath), + } + r.cache.Store(dir, info) + } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + + p := &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + relevance: MaxRelevance - 1, + } + if info.rootType == gopathwalk.RootGOROOT { + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) + } + } + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { + var result []gopathwalk.Root + for _, root := range roots { + if !include(root) { + continue + } + result = append(result, root) + } + return result +} + +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { + return r.cache.CacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { + var exports []string + + // Look for non-test, buildable .go files which could provide exports. + all, err := ioutil.ReadDir(dir) + if err != nil { + return "", nil, err + } + var files []os.FileInfo + for _, fi := range all { + name := fi.Name() + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { + continue + } + match, err := env.buildContext().MatchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + files = append(files, fi) + } + + if len(files) == 0 { + return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) + } + + var pkgName string + fset := token.NewFileSet() + for _, fi := range files { + select { + case <-ctx.Done(): + return "", nil, ctx.Err() + default: + } + + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue + } + if f.Name.Name == "documentation" { + // Special case from go/build.ImportDir, not + // handled by MatchFile above. + continue + } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } + pkgName = f.Name.Name + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports = append(exports, name) + } + } + } + + if env.Logf != nil { + sortedExports := append([]string(nil), exports...) + sort.Strings(sortedExports) + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) + } + return pkgName, exports, nil +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byDistanceOrImportPathShortLength(candidates)) + if pass.env.Logf != nil { + for i, c := range candidates { + pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + } + } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } + + // Collect exports for packages with matching names. + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg, 1) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + for i, c := range candidates { + select { + case loadExportsSem <- struct{}{}: + case <-ctx.Done(): + return + } + + wg.Add(1) + go func(c pkgDistance, resc chan<- *pkg) { + defer func() { + <-loadExportsSem + wg.Done() + }() + + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + } + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + if err != nil { + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + } + resc <- nil + return + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym] = true + } + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exportsMap[symbol] { + resc <- nil + return + } + } + resc <- c.pkg + }(c, rescv[i]) + } + }() + + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + return pkg, nil + } + return nil, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accommodate that. + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + } + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func copyExports(pkg []string) map[string]bool { + m := make(map[string]bool, len(pkg)) + for _, v := range pkg { + m[v] = true + } + return m +} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go new file mode 100644 index 00000000..2815edc3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -0,0 +1,346 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkstdlib.go + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" +) + +// Options is golang.org/x/tools/imports.Options with extra internal-only options. +type Options struct { + Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + return nil, err + } + } + return formatFile(fileSet, file, src, adjust, opt) +} + +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. src and opt must +// be specified. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + fileSet := token.NewFileSet() + file, _, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + return getFixes(fileSet, file, filename, opt.Env) +} + +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparseable files. + fileSet := token.NewFileSet() + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, nil, opt) +} + +func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(fileSet, file) + sortImports(opt.LocalPrefix, fileSet, file) + imps := astutil.Imports(fileSet, file) + var spacesBefore []string // import paths we need spaces before + for _, impSection := range imps { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(opt.LocalPrefix, importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err := printConfig.Fprint(&buf, fileSet, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out, err = addImportSpaces(bytes.NewReader(out), spacesBefore) + if err != nil { + return nil, err + } + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that parse errors are on + // the correct line. + const prefix = "package main;" + psrc := append([]byte(prefix), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // Gofmt will turn the ; into a \n. + // Do that ourselves now and update the file contents, + // so that positions and line numbers are correct going forward. + psrc[len(prefix)-1] = '\n' + fset.File(file.Package).SetLinesForContent(psrc) + + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + src = src[len(prefix):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2) matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3) matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`) + +func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) { + var out bytes.Buffer + in := bufio.NewReader(r) + inImports := false + done := false + for { + s, err := in.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprint(&out, s) + } + return out.Bytes(), nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go new file mode 100644 index 00000000..664fbbf5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -0,0 +1,659 @@ +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" +) + +// ModuleResolver implements resolver for modules using the go command as little +// as feasible. +type ModuleResolver struct { + env *ProcessEnv + moduleCacheDir string + dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool + + initialized bool + main *gocommand.ModuleJSON + modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*gocommand.ModuleJSON // ...or Dir. + + // moduleCacheCache stores information about the module cache. + moduleCacheCache *dirInfoCache + otherCache *dirInfoCache +} + +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *ModuleResolver) init() error { + if r.initialized { + return nil + } + + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return err + } + + if mainMod != nil && vendorEnabled { + // Vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.main = mainMod + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainMod.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + } else { + // Vendor mode is off, so run go list -m ... to find everything. + r.initAllMods() + } + + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.gopath())[0], "/pkg/mod") + } + + sort.Slice(r.modsByModPath, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByModPath[x].Path, "/") + } + return count(j) < count(i) // descending order + }) + sort.Slice(r.modsByDir, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByDir[x].Dir, "/") + } + return count(j) < count(i) // descending order + }) + + r.roots = []gopathwalk.Root{ + {filepath.Join(r.env.goroot(), "/src"), gopathwalk.RootGOROOT}, + } + if r.main != nil { + r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + } else { + addDep := func(mod *gocommand.ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} + if r.moduleCacheCache == nil { + r.moduleCacheCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + } + if r.otherCache == nil { + r.otherCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + } + r.initialized = true + return nil +} + +func (r *ModuleResolver) initAllMods() error { + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...") + if err != nil { + return err + } + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &gocommand.ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return err + } + if mod.Dir == "" { + if r.env.Logf != nil { + r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) + } + // Can't do anything with a module that's not downloaded. + continue + } + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) + if mod.Main { + r.main = mod + } + } + return nil +} + +func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} + r.otherCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.scanSema <- struct{}{} +} + +func (r *ModuleResolver) ClearForNewMod() { + <-r.scanSema + *r = ModuleResolver{ + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, + } + r.init() + r.scanSema <- struct{}{} +} + +// findPackage returns the module and directory that contains the package at +// the given import path, or returns nil, "" if no module is in scope. +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { + // This can't find packages in the stdlib, but that's harmless for all + // the existing code paths. + for _, m := range r.modsByModPath { + if !strings.HasPrefix(importPath, m.Path) { + continue + } + pathInModule := importPath[len(m.Path):] + pkgDir := filepath.Join(m.Dir, pathInModule) + if r.dirIsNestedModule(pkgDir, m) { + continue + } + + if info, ok := r.cacheLoad(pkgDir); ok { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + if err != nil { + continue // No package in this dir. + } + return m, pkgDir + } + if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil { + continue // Dir is unreadable, etc. + } + // This is slightly wrong: a directory doesn't have to have an + // importable package to count as a package for package-to-module + // resolution. package main or _test files should count but + // don't. + // TODO(heschi): fix this. + if _, err := r.cachePackageName(info); err == nil { + return m, pkgDir + } + } + + // Not cached. Read the filesystem. + pkgFiles, err := ioutil.ReadDir(pkgDir) + if err != nil { + continue + } + // A module only contains a package if it has buildable go + // files in that directory. If not, it could be provided by an + // outer module. See #29736. + for _, fi := range pkgFiles { + if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok { + return m, pkgDir + } + } + } + return nil, "" +} + +func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) { + if info, ok := r.moduleCacheCache.Load(dir); ok { + return info, ok + } + return r.otherCache.Load(dir) +} + +func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { + if info.rootType == gopathwalk.RootModuleCache { + r.moduleCacheCache.Store(info.dir, info) + } else { + r.otherCache.Store(info.dir, info) + } +} + +func (r *ModuleResolver) cacheKeys() []string { + return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) +} + +// cachePackageName caches the package name for a dir already in the cache. +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CachePackageName(info) + } + return r.otherCache.CachePackageName(info) +} + +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CacheExports(ctx, env, info) + } + return r.otherCache.CacheExports(ctx, env, info) +} + +// findModuleByDir returns the module that contains dir, or nil if no such +// module is in scope. +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { + // This is quite tricky and may not be correct. dir could be: + // - a package in the main module. + // - a replace target underneath the main module's directory. + // - a nested module in the above. + // - a replace target somewhere totally random. + // - a nested module in the above. + // - in the mod cache. + // - in /vendor/ in -mod=vendor mode. + // - nested module? Dunno. + // Rumor has it that replace targets cannot contain other replace targets. + for _, m := range r.modsByDir { + if !strings.HasPrefix(dir, m.Dir) { + continue + } + + if r.dirIsNestedModule(dir, m) { + continue + } + + return m + } + return nil +} + +// dirIsNestedModule reports if dir is contained in a nested module underneath +// mod, not actually in mod. +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { + if !strings.HasPrefix(dir, mod.Dir) { + return false + } + if r.dirInModuleCache(dir) { + // Nested modules in the module cache are pruned, + // so it cannot be a nested module. + return false + } + if mod != nil && mod == r.dummyVendorMod { + // The /vendor pseudomodule is flattened and doesn't actually count. + return false + } + modDir, _ := r.modInfo(dir) + if modDir == "" { + return false + } + return modDir != mod.Dir +} + +func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { + readModName := func(modFile string) string { + modBytes, err := ioutil.ReadFile(modFile) + if err != nil { + return "" + } + return modulePath(modBytes) + } + + if r.dirInModuleCache(dir) { + matches := modCacheRegexp.FindStringSubmatch(dir) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } + for { + if info, ok := r.cacheLoad(dir); ok { + return info.moduleDir, info.moduleName + } + f := filepath.Join(dir, "go.mod") + info, err := os.Stat(f) + if err == nil && !info.IsDir() { + return dir, readModName(f) + } + + d := filepath.Dir(dir) + if len(d) >= len(dir) { + return "", "" // reached top of file system, no go.mod + } + dir = d + } +} + +func (r *ModuleResolver) dirInModuleCache(dir string) bool { + if r.moduleCacheDir == "" { + return false + } + return strings.HasPrefix(dir, r.moduleCacheDir) +} + +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + if err := r.init(); err != nil { + return nil, err + } + names := map[string]string{} + for _, path := range importPaths { + _, packageDir := r.findPackage(path) + if packageDir == "" { + continue + } + name, err := packageDirToName(packageDir) + if err != nil { + continue + } + names[path] = name + } + return names, nil +} + +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + if err := r.init(); err != nil { + return err + } + + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } + + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() + + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. + skip := func(root gopathwalk.Root, dir string) bool { + info, ok := r.cacheLoad(dir) + if !ok { + return false + } + // This directory can be skipped as long as we have already scanned it. + // Packages with errors will continue to have errors, so there is no need + // to rescan them. + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + // Add anything new to the cache, and process it if we're still listening. + add := func(root gopathwalk.Root, dir string) { + r.cacheStore(r.scanDirForPackage(root, dir)) + } + + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } + + if r.scannedRoots[root] { + continue + } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) + r.scannedRoots[root] = true + } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} + +func modRelevance(mod *gocommand.ModuleJSON) int { + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + return MaxRelevance - 3 + case !mod.Main: + return MaxRelevance - 2 + default: + return MaxRelevance - 1 // main module ties with stdlib + } +} + +// canonicalize gets the result of canonicalizing the packages using the results +// of initializing the resolver from 'go list -m'. +func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { + // Packages in GOROOT are already canonical, regardless of the std/cmd modules. + if info.rootType == gopathwalk.RootGOROOT { + return &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + packageName: path.Base(info.nonCanonicalImportPath), + relevance: MaxRelevance, + }, nil + } + + importPath := info.nonCanonicalImportPath + mod := r.findModuleByDir(info.dir) + // Check if the directory is underneath a module that's in scope. + if mod != nil { + // It is. If dir is the target of a replace directive, + // our guessed import path is wrong. Use the real one. + if mod.Dir == info.dir { + importPath = mod.Path + } else { + dirInMod := info.dir[len(mod.Dir)+len("/"):] + importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) + } + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. + return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) + } + + res := &pkg{ + importPathShort: importPath, + dir: info.dir, + relevance: modRelevance(mod), + } + // We may have discovered a package that has a different version + // in scope already. Canonicalize to that one if possible. + if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { + res.dir = canonicalDir + } + return res, nil +} + +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if err := r.init(); err != nil { + return "", nil, err + } + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { + return r.cacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + importPath := filepath.ToSlash(subdir) + if strings.HasPrefix(importPath, "vendor/") { + // Only enter vendor directories if they're explicitly requested as a root. + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("unwanted vendor directory"), + } + } + switch root.Type { + case gopathwalk.RootCurrentModule: + importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) + case gopathwalk.RootModuleCache: + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if r.env.Logf != nil { + r.env.Logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath = path.Join(modPath, filepath.ToSlash(matches[3])) + } + + modDir, modName := r.modInfo(dir) + result := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + if root.Type == gopathwalk.RootGOROOT { + // stdlib packages are always in scope, despite the confusing go.mod + return result + } + return result +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// modulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +// +// Copied from cmd/go/internal/modfile. +func modulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go new file mode 100644 index 00000000..5b4f03ac --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -0,0 +1,232 @@ +package imports + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/tools/internal/gopathwalk" +) + +// To find packages to import, the resolver needs to know about all of the +// the packages that could be imported. This includes packages that are +// already in modules that are in (1) the current module, (2) replace targets, +// and (3) packages in the module cache. Packages in (1) and (2) may change over +// time, as the client may edit the current module and locally replaced modules. +// The module cache (which includes all of the packages in (3)) can only +// ever be added to. +// +// The resolver can thus save state about packages in the module cache +// and guarantee that this will not change over time. To obtain information +// about new modules added to the module cache, the module cache should be +// rescanned. +// +// It is OK to serve information about modules that have been deleted, +// as they do still exist. +// TODO(suzmue): can we share information with the caller about +// what module needs to be downloaded to import this package? + +type directoryPackageStatus int + +const ( + _ directoryPackageStatus = iota + directoryScanned + nameLoaded + exportsLoaded +) + +type directoryPackageInfo struct { + // status indicates the extent to which this struct has been filled in. + status directoryPackageStatus + // err is non-nil when there was an error trying to reach status. + err error + + // Set when status >= directoryScanned. + + // dir is the absolute directory of this package. + dir string + rootType gopathwalk.RootType + // nonCanonicalImportPath is the package's expected import path. It may + // not actually be importable at that path. + nonCanonicalImportPath string + + // Module-related information. + moduleDir string // The directory that is the module root of this dir. + moduleName string // The module name that contains this dir. + + // Set when status >= nameLoaded. + + packageName string // the package name, as declared in the source. + + // Set when status >= exportsLoaded. + + exports []string +} + +// reachedStatus returns true when info has a status at least target and any error associated with +// an attempt to reach target. +func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) { + if info.err == nil { + return info.status >= target, nil + } + if info.status == target { + return true, info.err + } + return true, nil +} + +// dirInfoCache is a concurrency safe map for storing information about +// directories that may contain packages. +// +// The information in this cache is built incrementally. Entries are initialized in scan. +// No new keys should be added in any other functions, as all directories containing +// packages are identified in scan. +// +// Other functions, including loadExports and findPackage, may update entries in this cache +// as they discover new things about the directory. +// +// The information in the cache is not expected to change for the cache's +// lifetime, so there is no protection against competing writes. Users should +// take care not to hold the cache across changes to the underlying files. +// +// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) +type dirInfoCache struct { + mu sync.Mutex + // dirs stores information about packages in directories, keyed by absolute path. + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop +} + +// Store stores the package info for dir. +func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { + d.mu.Lock() + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } +} + +// Load returns a copy of the directoryPackageInfo for absolute directory dir. +func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { + d.mu.Lock() + defer d.mu.Unlock() + info, ok := d.dirs[dir] + if !ok { + return directoryPackageInfo{}, false + } + return *info, true +} + +// Keys returns the keys currently present in d. +func (d *dirInfoCache) Keys() (keys []string) { + d.mu.Lock() + defer d.mu.Unlock() + for key := range d.dirs { + keys = append(keys, key) + } + return keys +} + +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + return info.packageName, err + } + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return "", fmt.Errorf("cannot read package name, scan error: %v", err) + } + info.packageName, info.err = packageDirToName(info.dir) + info.status = nameLoaded + d.Store(info.dir, info) + return info.packageName, info.err +} + +func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { + if reached, _ := info.reachedStatus(exportsLoaded); reached { + return info.packageName, info.exports, info.err + } + if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { + return "", nil, err + } + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { + return info.packageName, info.exports, info.err + } + // The cache structure wants things to proceed linearly. We can skip a + // step here, but only if we succeed. + if info.status == nameLoaded || info.err == nil { + info.status = exportsLoaded + } else { + info.status = nameLoaded + } + d.Store(info.dir, info) + return info.packageName, info.exports, info.err +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go new file mode 100644 index 00000000..be8ffa25 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -0,0 +1,280 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go + +package imports + +import ( + "go/ast" + "go/token" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := fset.Position(lastSpec.Pos()).Line + if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { + fset.File(d.Rparen).MergeLine(rParenLine - 1) + } + } + } +} + +// mergeImports merges all the import declarations into the first one. +// Taken from golang.org/x/tools/ast/astutil. +func mergeImports(fset *token.FileSet, f *ast.File) { + if len(f.Decls) <= 1 { + return + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } +} + +// declImports reports whether gen contains an import of path. +// Taken from golang.org/x/tools/ast/astutil. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := fset.Position(pos[len(pos)-1].End).Line + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if fset.Position(g.End()).Line > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec{localPrefix, specs}) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + fset.File(p).MergeLine(fset.Position(p).Line) + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + nextSpecPos := pos[i].End + + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + nextSpecPos = c.End() + } + } + if i < len(specs)-1 { + pos[i+1].Start = nextSpecPos + pos[i+1].End = nextSpecPos + } + } + + sort.Sort(byCommentPos(comments)) + + // Fixup comments can insert blank lines, because import specs are on different lines. + // We remove those blank lines here by merging import spec to the first import spec line. + firstSpecLine := fset.Position(specs[0].Pos()).Line + for _, s := range specs[1:] { + p := s.Pos() + line := fset.File(p).Line(p) + for previousLine := line - 1; previousLine >= firstSpecLine; { + fset.File(p).MergeLine(previousLine) + previousLine-- + } + } + return specs +} + +type byImportSpec struct { + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec +} + +func (x byImportSpec) Len() int { return len(x.specs) } +func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x.specs[i]) + jpath := importPath(x.specs[j]) + + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x.specs[i]) + jname := importName(x.specs[j]) + + if iname != jname { + return iname < jname + } + return importComment(x.specs[i]) < importComment(x.specs[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go new file mode 100644 index 00000000..16252111 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -0,0 +1,10464 @@ +// Code generated by mkstdlib.go. DO NOT EDIT. + +package imports + +var stdlib = map[string][]string{ + "archive/tar": []string{ + "ErrFieldTooLong", + "ErrHeader", + "ErrWriteAfterClose", + "ErrWriteTooLong", + "FileInfoHeader", + "Format", + "FormatGNU", + "FormatPAX", + "FormatUSTAR", + "FormatUnknown", + "Header", + "NewReader", + "NewWriter", + "Reader", + "TypeBlock", + "TypeChar", + "TypeCont", + "TypeDir", + "TypeFifo", + "TypeGNULongLink", + "TypeGNULongName", + "TypeGNUSparse", + "TypeLink", + "TypeReg", + "TypeRegA", + "TypeSymlink", + "TypeXGlobalHeader", + "TypeXHeader", + "Writer", + }, + "archive/zip": []string{ + "Compressor", + "Decompressor", + "Deflate", + "ErrAlgorithm", + "ErrChecksum", + "ErrFormat", + "File", + "FileHeader", + "FileInfoHeader", + "NewReader", + "NewWriter", + "OpenReader", + "ReadCloser", + "Reader", + "RegisterCompressor", + "RegisterDecompressor", + "Store", + "Writer", + }, + "bufio": []string{ + "ErrAdvanceTooFar", + "ErrBufferFull", + "ErrFinalToken", + "ErrInvalidUnreadByte", + "ErrInvalidUnreadRune", + "ErrNegativeAdvance", + "ErrNegativeCount", + "ErrTooLong", + "MaxScanTokenSize", + "NewReadWriter", + "NewReader", + "NewReaderSize", + "NewScanner", + "NewWriter", + "NewWriterSize", + "ReadWriter", + "Reader", + "ScanBytes", + "ScanLines", + "ScanRunes", + "ScanWords", + "Scanner", + "SplitFunc", + "Writer", + }, + "bytes": []string{ + "Buffer", + "Compare", + "Contains", + "ContainsAny", + "ContainsRune", + "Count", + "Equal", + "EqualFold", + "ErrTooLarge", + "Fields", + "FieldsFunc", + "HasPrefix", + "HasSuffix", + "Index", + "IndexAny", + "IndexByte", + "IndexFunc", + "IndexRune", + "Join", + "LastIndex", + "LastIndexAny", + "LastIndexByte", + "LastIndexFunc", + "Map", + "MinRead", + "NewBuffer", + "NewBufferString", + "NewReader", + "Reader", + "Repeat", + "Replace", + "ReplaceAll", + "Runes", + "Split", + "SplitAfter", + "SplitAfterN", + "SplitN", + "Title", + "ToLower", + "ToLowerSpecial", + "ToTitle", + "ToTitleSpecial", + "ToUpper", + "ToUpperSpecial", + "ToValidUTF8", + "Trim", + "TrimFunc", + "TrimLeft", + "TrimLeftFunc", + "TrimPrefix", + "TrimRight", + "TrimRightFunc", + "TrimSpace", + "TrimSuffix", + }, + "compress/bzip2": []string{ + "NewReader", + "StructuralError", + }, + "compress/flate": []string{ + "BestCompression", + "BestSpeed", + "CorruptInputError", + "DefaultCompression", + "HuffmanOnly", + "InternalError", + "NewReader", + "NewReaderDict", + "NewWriter", + "NewWriterDict", + "NoCompression", + "ReadError", + "Reader", + "Resetter", + "WriteError", + "Writer", + }, + "compress/gzip": []string{ + "BestCompression", + "BestSpeed", + "DefaultCompression", + "ErrChecksum", + "ErrHeader", + "Header", + "HuffmanOnly", + "NewReader", + "NewWriter", + "NewWriterLevel", + "NoCompression", + "Reader", + "Writer", + }, + "compress/lzw": []string{ + "LSB", + "MSB", + "NewReader", + "NewWriter", + "Order", + }, + "compress/zlib": []string{ + "BestCompression", + "BestSpeed", + "DefaultCompression", + "ErrChecksum", + "ErrDictionary", + "ErrHeader", + "HuffmanOnly", + "NewReader", + "NewReaderDict", + "NewWriter", + "NewWriterLevel", + "NewWriterLevelDict", + "NoCompression", + "Resetter", + "Writer", + }, + "container/heap": []string{ + "Fix", + "Init", + "Interface", + "Pop", + "Push", + "Remove", + }, + "container/list": []string{ + "Element", + "List", + "New", + }, + "container/ring": []string{ + "New", + "Ring", + }, + "context": []string{ + "Background", + "CancelFunc", + "Canceled", + "Context", + "DeadlineExceeded", + "TODO", + "WithCancel", + "WithDeadline", + "WithTimeout", + "WithValue", + }, + "crypto": []string{ + "BLAKE2b_256", + "BLAKE2b_384", + "BLAKE2b_512", + "BLAKE2s_256", + "Decrypter", + "DecrypterOpts", + "Hash", + "MD4", + "MD5", + "MD5SHA1", + "PrivateKey", + "PublicKey", + "RIPEMD160", + "RegisterHash", + "SHA1", + "SHA224", + "SHA256", + "SHA384", + "SHA3_224", + "SHA3_256", + "SHA3_384", + "SHA3_512", + "SHA512", + "SHA512_224", + "SHA512_256", + "Signer", + "SignerOpts", + }, + "crypto/aes": []string{ + "BlockSize", + "KeySizeError", + "NewCipher", + }, + "crypto/cipher": []string{ + "AEAD", + "Block", + "BlockMode", + "NewCBCDecrypter", + "NewCBCEncrypter", + "NewCFBDecrypter", + "NewCFBEncrypter", + "NewCTR", + "NewGCM", + "NewGCMWithNonceSize", + "NewGCMWithTagSize", + "NewOFB", + "Stream", + "StreamReader", + "StreamWriter", + }, + "crypto/des": []string{ + "BlockSize", + "KeySizeError", + "NewCipher", + "NewTripleDESCipher", + }, + "crypto/dsa": []string{ + "ErrInvalidPublicKey", + "GenerateKey", + "GenerateParameters", + "L1024N160", + "L2048N224", + "L2048N256", + "L3072N256", + "ParameterSizes", + "Parameters", + "PrivateKey", + "PublicKey", + "Sign", + "Verify", + }, + "crypto/ecdsa": []string{ + "GenerateKey", + "PrivateKey", + "PublicKey", + "Sign", + "Verify", + }, + "crypto/ed25519": []string{ + "GenerateKey", + "NewKeyFromSeed", + "PrivateKey", + "PrivateKeySize", + "PublicKey", + "PublicKeySize", + "SeedSize", + "Sign", + "SignatureSize", + "Verify", + }, + "crypto/elliptic": []string{ + "Curve", + "CurveParams", + "GenerateKey", + "Marshal", + "P224", + "P256", + "P384", + "P521", + "Unmarshal", + }, + "crypto/hmac": []string{ + "Equal", + "New", + }, + "crypto/md5": []string{ + "BlockSize", + "New", + "Size", + "Sum", + }, + "crypto/rand": []string{ + "Int", + "Prime", + "Read", + "Reader", + }, + "crypto/rc4": []string{ + "Cipher", + "KeySizeError", + "NewCipher", + }, + "crypto/rsa": []string{ + "CRTValue", + "DecryptOAEP", + "DecryptPKCS1v15", + "DecryptPKCS1v15SessionKey", + "EncryptOAEP", + "EncryptPKCS1v15", + "ErrDecryption", + "ErrMessageTooLong", + "ErrVerification", + "GenerateKey", + "GenerateMultiPrimeKey", + "OAEPOptions", + "PKCS1v15DecryptOptions", + "PSSOptions", + "PSSSaltLengthAuto", + "PSSSaltLengthEqualsHash", + "PrecomputedValues", + "PrivateKey", + "PublicKey", + "SignPKCS1v15", + "SignPSS", + "VerifyPKCS1v15", + "VerifyPSS", + }, + "crypto/sha1": []string{ + "BlockSize", + "New", + "Size", + "Sum", + }, + "crypto/sha256": []string{ + "BlockSize", + "New", + "New224", + "Size", + "Size224", + "Sum224", + "Sum256", + }, + "crypto/sha512": []string{ + "BlockSize", + "New", + "New384", + "New512_224", + "New512_256", + "Size", + "Size224", + "Size256", + "Size384", + "Sum384", + "Sum512", + "Sum512_224", + "Sum512_256", + }, + "crypto/subtle": []string{ + "ConstantTimeByteEq", + "ConstantTimeCompare", + "ConstantTimeCopy", + "ConstantTimeEq", + "ConstantTimeLessOrEq", + "ConstantTimeSelect", + }, + "crypto/tls": []string{ + "Certificate", + "CertificateRequestInfo", + "CipherSuite", + "CipherSuiteName", + "CipherSuites", + "Client", + "ClientAuthType", + "ClientHelloInfo", + "ClientSessionCache", + "ClientSessionState", + "Config", + "Conn", + "ConnectionState", + "CurveID", + "CurveP256", + "CurveP384", + "CurveP521", + "Dial", + "DialWithDialer", + "ECDSAWithP256AndSHA256", + "ECDSAWithP384AndSHA384", + "ECDSAWithP521AndSHA512", + "ECDSAWithSHA1", + "Ed25519", + "InsecureCipherSuites", + "Listen", + "LoadX509KeyPair", + "NewLRUClientSessionCache", + "NewListener", + "NoClientCert", + "PKCS1WithSHA1", + "PKCS1WithSHA256", + "PKCS1WithSHA384", + "PKCS1WithSHA512", + "PSSWithSHA256", + "PSSWithSHA384", + "PSSWithSHA512", + "RecordHeaderError", + "RenegotiateFreelyAsClient", + "RenegotiateNever", + "RenegotiateOnceAsClient", + "RenegotiationSupport", + "RequestClientCert", + "RequireAndVerifyClientCert", + "RequireAnyClientCert", + "Server", + "SignatureScheme", + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + "TLS_FALLBACK_SCSV", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_RC4_128_SHA", + "VerifyClientCertIfGiven", + "VersionSSL30", + "VersionTLS10", + "VersionTLS11", + "VersionTLS12", + "VersionTLS13", + "X25519", + "X509KeyPair", + }, + "crypto/x509": []string{ + "CANotAuthorizedForExtKeyUsage", + "CANotAuthorizedForThisName", + "CertPool", + "Certificate", + "CertificateInvalidError", + "CertificateRequest", + "ConstraintViolationError", + "CreateCertificate", + "CreateCertificateRequest", + "DSA", + "DSAWithSHA1", + "DSAWithSHA256", + "DecryptPEMBlock", + "ECDSA", + "ECDSAWithSHA1", + "ECDSAWithSHA256", + "ECDSAWithSHA384", + "ECDSAWithSHA512", + "Ed25519", + "EncryptPEMBlock", + "ErrUnsupportedAlgorithm", + "Expired", + "ExtKeyUsage", + "ExtKeyUsageAny", + "ExtKeyUsageClientAuth", + "ExtKeyUsageCodeSigning", + "ExtKeyUsageEmailProtection", + "ExtKeyUsageIPSECEndSystem", + "ExtKeyUsageIPSECTunnel", + "ExtKeyUsageIPSECUser", + "ExtKeyUsageMicrosoftCommercialCodeSigning", + "ExtKeyUsageMicrosoftKernelCodeSigning", + "ExtKeyUsageMicrosoftServerGatedCrypto", + "ExtKeyUsageNetscapeServerGatedCrypto", + "ExtKeyUsageOCSPSigning", + "ExtKeyUsageServerAuth", + "ExtKeyUsageTimeStamping", + "HostnameError", + "IncompatibleUsage", + "IncorrectPasswordError", + "InsecureAlgorithmError", + "InvalidReason", + "IsEncryptedPEMBlock", + "KeyUsage", + "KeyUsageCRLSign", + "KeyUsageCertSign", + "KeyUsageContentCommitment", + "KeyUsageDataEncipherment", + "KeyUsageDecipherOnly", + "KeyUsageDigitalSignature", + "KeyUsageEncipherOnly", + "KeyUsageKeyAgreement", + "KeyUsageKeyEncipherment", + "MD2WithRSA", + "MD5WithRSA", + "MarshalECPrivateKey", + "MarshalPKCS1PrivateKey", + "MarshalPKCS1PublicKey", + "MarshalPKCS8PrivateKey", + "MarshalPKIXPublicKey", + "NameConstraintsWithoutSANs", + "NameMismatch", + "NewCertPool", + "NotAuthorizedToSign", + "PEMCipher", + "PEMCipher3DES", + "PEMCipherAES128", + "PEMCipherAES192", + "PEMCipherAES256", + "PEMCipherDES", + "ParseCRL", + "ParseCertificate", + "ParseCertificateRequest", + "ParseCertificates", + "ParseDERCRL", + "ParseECPrivateKey", + "ParsePKCS1PrivateKey", + "ParsePKCS1PublicKey", + "ParsePKCS8PrivateKey", + "ParsePKIXPublicKey", + "PublicKeyAlgorithm", + "PureEd25519", + "RSA", + "SHA1WithRSA", + "SHA256WithRSA", + "SHA256WithRSAPSS", + "SHA384WithRSA", + "SHA384WithRSAPSS", + "SHA512WithRSA", + "SHA512WithRSAPSS", + "SignatureAlgorithm", + "SystemCertPool", + "SystemRootsError", + "TooManyConstraints", + "TooManyIntermediates", + "UnconstrainedName", + "UnhandledCriticalExtension", + "UnknownAuthorityError", + "UnknownPublicKeyAlgorithm", + "UnknownSignatureAlgorithm", + "VerifyOptions", + }, + "crypto/x509/pkix": []string{ + "AlgorithmIdentifier", + "AttributeTypeAndValue", + "AttributeTypeAndValueSET", + "CertificateList", + "Extension", + "Name", + "RDNSequence", + "RelativeDistinguishedNameSET", + "RevokedCertificate", + "TBSCertificateList", + }, + "database/sql": []string{ + "ColumnType", + "Conn", + "DB", + "DBStats", + "Drivers", + "ErrConnDone", + "ErrNoRows", + "ErrTxDone", + "IsolationLevel", + "LevelDefault", + "LevelLinearizable", + "LevelReadCommitted", + "LevelReadUncommitted", + "LevelRepeatableRead", + "LevelSerializable", + "LevelSnapshot", + "LevelWriteCommitted", + "Named", + "NamedArg", + "NullBool", + "NullFloat64", + "NullInt32", + "NullInt64", + "NullString", + "NullTime", + "Open", + "OpenDB", + "Out", + "RawBytes", + "Register", + "Result", + "Row", + "Rows", + "Scanner", + "Stmt", + "Tx", + "TxOptions", + }, + "database/sql/driver": []string{ + "Bool", + "ColumnConverter", + "Conn", + "ConnBeginTx", + "ConnPrepareContext", + "Connector", + "DefaultParameterConverter", + "Driver", + "DriverContext", + "ErrBadConn", + "ErrRemoveArgument", + "ErrSkip", + "Execer", + "ExecerContext", + "Int32", + "IsScanValue", + "IsValue", + "IsolationLevel", + "NamedValue", + "NamedValueChecker", + "NotNull", + "Null", + "Pinger", + "Queryer", + "QueryerContext", + "Result", + "ResultNoRows", + "Rows", + "RowsAffected", + "RowsColumnTypeDatabaseTypeName", + "RowsColumnTypeLength", + "RowsColumnTypeNullable", + "RowsColumnTypePrecisionScale", + "RowsColumnTypeScanType", + "RowsNextResultSet", + "SessionResetter", + "Stmt", + "StmtExecContext", + "StmtQueryContext", + "String", + "Tx", + "TxOptions", + "Value", + "ValueConverter", + "Valuer", + }, + "debug/dwarf": []string{ + "AddrType", + "ArrayType", + "Attr", + "AttrAbstractOrigin", + "AttrAccessibility", + "AttrAddrBase", + "AttrAddrClass", + "AttrAlignment", + "AttrAllocated", + "AttrArtificial", + "AttrAssociated", + "AttrBaseTypes", + "AttrBinaryScale", + "AttrBitOffset", + "AttrBitSize", + "AttrByteSize", + "AttrCallAllCalls", + "AttrCallAllSourceCalls", + "AttrCallAllTailCalls", + "AttrCallColumn", + "AttrCallDataLocation", + "AttrCallDataValue", + "AttrCallFile", + "AttrCallLine", + "AttrCallOrigin", + "AttrCallPC", + "AttrCallParameter", + "AttrCallReturnPC", + "AttrCallTailCall", + "AttrCallTarget", + "AttrCallTargetClobbered", + "AttrCallValue", + "AttrCalling", + "AttrCommonRef", + "AttrCompDir", + "AttrConstExpr", + "AttrConstValue", + "AttrContainingType", + "AttrCount", + "AttrDataBitOffset", + "AttrDataLocation", + "AttrDataMemberLoc", + "AttrDecimalScale", + "AttrDecimalSign", + "AttrDeclColumn", + "AttrDeclFile", + "AttrDeclLine", + "AttrDeclaration", + "AttrDefaultValue", + "AttrDefaulted", + "AttrDeleted", + "AttrDescription", + "AttrDigitCount", + "AttrDiscr", + "AttrDiscrList", + "AttrDiscrValue", + "AttrDwoName", + "AttrElemental", + "AttrEncoding", + "AttrEndianity", + "AttrEntrypc", + "AttrEnumClass", + "AttrExplicit", + "AttrExportSymbols", + "AttrExtension", + "AttrExternal", + "AttrFrameBase", + "AttrFriend", + "AttrHighpc", + "AttrIdentifierCase", + "AttrImport", + "AttrInline", + "AttrIsOptional", + "AttrLanguage", + "AttrLinkageName", + "AttrLocation", + "AttrLoclistsBase", + "AttrLowerBound", + "AttrLowpc", + "AttrMacroInfo", + "AttrMacros", + "AttrMainSubprogram", + "AttrMutable", + "AttrName", + "AttrNamelistItem", + "AttrNoreturn", + "AttrObjectPointer", + "AttrOrdering", + "AttrPictureString", + "AttrPriority", + "AttrProducer", + "AttrPrototyped", + "AttrPure", + "AttrRanges", + "AttrRank", + "AttrRecursive", + "AttrReference", + "AttrReturnAddr", + "AttrRnglistsBase", + "AttrRvalueReference", + "AttrSegment", + "AttrSibling", + "AttrSignature", + "AttrSmall", + "AttrSpecification", + "AttrStartScope", + "AttrStaticLink", + "AttrStmtList", + "AttrStrOffsetsBase", + "AttrStride", + "AttrStrideSize", + "AttrStringLength", + "AttrStringLengthBitSize", + "AttrStringLengthByteSize", + "AttrThreadsScaled", + "AttrTrampoline", + "AttrType", + "AttrUpperBound", + "AttrUseLocation", + "AttrUseUTF8", + "AttrVarParam", + "AttrVirtuality", + "AttrVisibility", + "AttrVtableElemLoc", + "BasicType", + "BoolType", + "CharType", + "Class", + "ClassAddrPtr", + "ClassAddress", + "ClassBlock", + "ClassConstant", + "ClassExprLoc", + "ClassFlag", + "ClassLinePtr", + "ClassLocList", + "ClassLocListPtr", + "ClassMacPtr", + "ClassRangeListPtr", + "ClassReference", + "ClassReferenceAlt", + "ClassReferenceSig", + "ClassRngList", + "ClassRngListsPtr", + "ClassStrOffsetsPtr", + "ClassString", + "ClassStringAlt", + "ClassUnknown", + "CommonType", + "ComplexType", + "Data", + "DecodeError", + "DotDotDotType", + "Entry", + "EnumType", + "EnumValue", + "ErrUnknownPC", + "Field", + "FloatType", + "FuncType", + "IntType", + "LineEntry", + "LineFile", + "LineReader", + "LineReaderPos", + "New", + "Offset", + "PtrType", + "QualType", + "Reader", + "StructField", + "StructType", + "Tag", + "TagAccessDeclaration", + "TagArrayType", + "TagAtomicType", + "TagBaseType", + "TagCallSite", + "TagCallSiteParameter", + "TagCatchDwarfBlock", + "TagClassType", + "TagCoarrayType", + "TagCommonDwarfBlock", + "TagCommonInclusion", + "TagCompileUnit", + "TagCondition", + "TagConstType", + "TagConstant", + "TagDwarfProcedure", + "TagDynamicType", + "TagEntryPoint", + "TagEnumerationType", + "TagEnumerator", + "TagFileType", + "TagFormalParameter", + "TagFriend", + "TagGenericSubrange", + "TagImmutableType", + "TagImportedDeclaration", + "TagImportedModule", + "TagImportedUnit", + "TagInheritance", + "TagInlinedSubroutine", + "TagInterfaceType", + "TagLabel", + "TagLexDwarfBlock", + "TagMember", + "TagModule", + "TagMutableType", + "TagNamelist", + "TagNamelistItem", + "TagNamespace", + "TagPackedType", + "TagPartialUnit", + "TagPointerType", + "TagPtrToMemberType", + "TagReferenceType", + "TagRestrictType", + "TagRvalueReferenceType", + "TagSetType", + "TagSharedType", + "TagSkeletonUnit", + "TagStringType", + "TagStructType", + "TagSubprogram", + "TagSubrangeType", + "TagSubroutineType", + "TagTemplateAlias", + "TagTemplateTypeParameter", + "TagTemplateValueParameter", + "TagThrownType", + "TagTryDwarfBlock", + "TagTypeUnit", + "TagTypedef", + "TagUnionType", + "TagUnspecifiedParameters", + "TagUnspecifiedType", + "TagVariable", + "TagVariant", + "TagVariantPart", + "TagVolatileType", + "TagWithStmt", + "Type", + "TypedefType", + "UcharType", + "UintType", + "UnspecifiedType", + "UnsupportedType", + "VoidType", + }, + "debug/elf": []string{ + "ARM_MAGIC_TRAMP_NUMBER", + "COMPRESS_HIOS", + "COMPRESS_HIPROC", + "COMPRESS_LOOS", + "COMPRESS_LOPROC", + "COMPRESS_ZLIB", + "Chdr32", + "Chdr64", + "Class", + "CompressionType", + "DF_BIND_NOW", + "DF_ORIGIN", + "DF_STATIC_TLS", + "DF_SYMBOLIC", + "DF_TEXTREL", + "DT_BIND_NOW", + "DT_DEBUG", + "DT_ENCODING", + "DT_FINI", + "DT_FINI_ARRAY", + "DT_FINI_ARRAYSZ", + "DT_FLAGS", + "DT_HASH", + "DT_HIOS", + "DT_HIPROC", + "DT_INIT", + "DT_INIT_ARRAY", + "DT_INIT_ARRAYSZ", + "DT_JMPREL", + "DT_LOOS", + "DT_LOPROC", + "DT_NEEDED", + "DT_NULL", + "DT_PLTGOT", + "DT_PLTREL", + "DT_PLTRELSZ", + "DT_PREINIT_ARRAY", + "DT_PREINIT_ARRAYSZ", + "DT_REL", + "DT_RELA", + "DT_RELAENT", + "DT_RELASZ", + "DT_RELENT", + "DT_RELSZ", + "DT_RPATH", + "DT_RUNPATH", + "DT_SONAME", + "DT_STRSZ", + "DT_STRTAB", + "DT_SYMBOLIC", + "DT_SYMENT", + "DT_SYMTAB", + "DT_TEXTREL", + "DT_VERNEED", + "DT_VERNEEDNUM", + "DT_VERSYM", + "Data", + "Dyn32", + "Dyn64", + "DynFlag", + "DynTag", + "EI_ABIVERSION", + "EI_CLASS", + "EI_DATA", + "EI_NIDENT", + "EI_OSABI", + "EI_PAD", + "EI_VERSION", + "ELFCLASS32", + "ELFCLASS64", + "ELFCLASSNONE", + "ELFDATA2LSB", + "ELFDATA2MSB", + "ELFDATANONE", + "ELFMAG", + "ELFOSABI_86OPEN", + "ELFOSABI_AIX", + "ELFOSABI_ARM", + "ELFOSABI_AROS", + "ELFOSABI_CLOUDABI", + "ELFOSABI_FENIXOS", + "ELFOSABI_FREEBSD", + "ELFOSABI_HPUX", + "ELFOSABI_HURD", + "ELFOSABI_IRIX", + "ELFOSABI_LINUX", + "ELFOSABI_MODESTO", + "ELFOSABI_NETBSD", + "ELFOSABI_NONE", + "ELFOSABI_NSK", + "ELFOSABI_OPENBSD", + "ELFOSABI_OPENVMS", + "ELFOSABI_SOLARIS", + "ELFOSABI_STANDALONE", + "ELFOSABI_TRU64", + "EM_386", + "EM_486", + "EM_56800EX", + "EM_68HC05", + "EM_68HC08", + "EM_68HC11", + "EM_68HC12", + "EM_68HC16", + "EM_68K", + "EM_78KOR", + "EM_8051", + "EM_860", + "EM_88K", + "EM_960", + "EM_AARCH64", + "EM_ALPHA", + "EM_ALPHA_STD", + "EM_ALTERA_NIOS2", + "EM_AMDGPU", + "EM_ARC", + "EM_ARCA", + "EM_ARC_COMPACT", + "EM_ARC_COMPACT2", + "EM_ARM", + "EM_AVR", + "EM_AVR32", + "EM_BA1", + "EM_BA2", + "EM_BLACKFIN", + "EM_BPF", + "EM_C166", + "EM_CDP", + "EM_CE", + "EM_CLOUDSHIELD", + "EM_COGE", + "EM_COLDFIRE", + "EM_COOL", + "EM_COREA_1ST", + "EM_COREA_2ND", + "EM_CR", + "EM_CR16", + "EM_CRAYNV2", + "EM_CRIS", + "EM_CRX", + "EM_CSR_KALIMBA", + "EM_CUDA", + "EM_CYPRESS_M8C", + "EM_D10V", + "EM_D30V", + "EM_DSP24", + "EM_DSPIC30F", + "EM_DXP", + "EM_ECOG1", + "EM_ECOG16", + "EM_ECOG1X", + "EM_ECOG2", + "EM_ETPU", + "EM_EXCESS", + "EM_F2MC16", + "EM_FIREPATH", + "EM_FR20", + "EM_FR30", + "EM_FT32", + "EM_FX66", + "EM_H8S", + "EM_H8_300", + "EM_H8_300H", + "EM_H8_500", + "EM_HUANY", + "EM_IA_64", + "EM_INTEL205", + "EM_INTEL206", + "EM_INTEL207", + "EM_INTEL208", + "EM_INTEL209", + "EM_IP2K", + "EM_JAVELIN", + "EM_K10M", + "EM_KM32", + "EM_KMX16", + "EM_KMX32", + "EM_KMX8", + "EM_KVARC", + "EM_L10M", + "EM_LANAI", + "EM_LATTICEMICO32", + "EM_M16C", + "EM_M32", + "EM_M32C", + "EM_M32R", + "EM_MANIK", + "EM_MAX", + "EM_MAXQ30", + "EM_MCHP_PIC", + "EM_MCST_ELBRUS", + "EM_ME16", + "EM_METAG", + "EM_MICROBLAZE", + "EM_MIPS", + "EM_MIPS_RS3_LE", + "EM_MIPS_RS4_BE", + "EM_MIPS_X", + "EM_MMA", + "EM_MMDSP_PLUS", + "EM_MMIX", + "EM_MN10200", + "EM_MN10300", + "EM_MOXIE", + "EM_MSP430", + "EM_NCPU", + "EM_NDR1", + "EM_NDS32", + "EM_NONE", + "EM_NORC", + "EM_NS32K", + "EM_OPEN8", + "EM_OPENRISC", + "EM_PARISC", + "EM_PCP", + "EM_PDP10", + "EM_PDP11", + "EM_PDSP", + "EM_PJ", + "EM_PPC", + "EM_PPC64", + "EM_PRISM", + "EM_QDSP6", + "EM_R32C", + "EM_RCE", + "EM_RH32", + "EM_RISCV", + "EM_RL78", + "EM_RS08", + "EM_RX", + "EM_S370", + "EM_S390", + "EM_SCORE7", + "EM_SEP", + "EM_SE_C17", + "EM_SE_C33", + "EM_SH", + "EM_SHARC", + "EM_SLE9X", + "EM_SNP1K", + "EM_SPARC", + "EM_SPARC32PLUS", + "EM_SPARCV9", + "EM_ST100", + "EM_ST19", + "EM_ST200", + "EM_ST7", + "EM_ST9PLUS", + "EM_STARCORE", + "EM_STM8", + "EM_STXP7X", + "EM_SVX", + "EM_TILE64", + "EM_TILEGX", + "EM_TILEPRO", + "EM_TINYJ", + "EM_TI_ARP32", + "EM_TI_C2000", + "EM_TI_C5500", + "EM_TI_C6000", + "EM_TI_PRU", + "EM_TMM_GPP", + "EM_TPC", + "EM_TRICORE", + "EM_TRIMEDIA", + "EM_TSK3000", + "EM_UNICORE", + "EM_V800", + "EM_V850", + "EM_VAX", + "EM_VIDEOCORE", + "EM_VIDEOCORE3", + "EM_VIDEOCORE5", + "EM_VISIUM", + "EM_VPP500", + "EM_X86_64", + "EM_XCORE", + "EM_XGATE", + "EM_XIMO16", + "EM_XTENSA", + "EM_Z80", + "EM_ZSP", + "ET_CORE", + "ET_DYN", + "ET_EXEC", + "ET_HIOS", + "ET_HIPROC", + "ET_LOOS", + "ET_LOPROC", + "ET_NONE", + "ET_REL", + "EV_CURRENT", + "EV_NONE", + "ErrNoSymbols", + "File", + "FileHeader", + "FormatError", + "Header32", + "Header64", + "ImportedSymbol", + "Machine", + "NT_FPREGSET", + "NT_PRPSINFO", + "NT_PRSTATUS", + "NType", + "NewFile", + "OSABI", + "Open", + "PF_MASKOS", + "PF_MASKPROC", + "PF_R", + "PF_W", + "PF_X", + "PT_DYNAMIC", + "PT_HIOS", + "PT_HIPROC", + "PT_INTERP", + "PT_LOAD", + "PT_LOOS", + "PT_LOPROC", + "PT_NOTE", + "PT_NULL", + "PT_PHDR", + "PT_SHLIB", + "PT_TLS", + "Prog", + "Prog32", + "Prog64", + "ProgFlag", + "ProgHeader", + "ProgType", + "R_386", + "R_386_16", + "R_386_32", + "R_386_32PLT", + "R_386_8", + "R_386_COPY", + "R_386_GLOB_DAT", + "R_386_GOT32", + "R_386_GOT32X", + "R_386_GOTOFF", + "R_386_GOTPC", + "R_386_IRELATIVE", + "R_386_JMP_SLOT", + "R_386_NONE", + "R_386_PC16", + "R_386_PC32", + "R_386_PC8", + "R_386_PLT32", + "R_386_RELATIVE", + "R_386_SIZE32", + "R_386_TLS_DESC", + "R_386_TLS_DESC_CALL", + "R_386_TLS_DTPMOD32", + "R_386_TLS_DTPOFF32", + "R_386_TLS_GD", + "R_386_TLS_GD_32", + "R_386_TLS_GD_CALL", + "R_386_TLS_GD_POP", + "R_386_TLS_GD_PUSH", + "R_386_TLS_GOTDESC", + "R_386_TLS_GOTIE", + "R_386_TLS_IE", + "R_386_TLS_IE_32", + "R_386_TLS_LDM", + "R_386_TLS_LDM_32", + "R_386_TLS_LDM_CALL", + "R_386_TLS_LDM_POP", + "R_386_TLS_LDM_PUSH", + "R_386_TLS_LDO_32", + "R_386_TLS_LE", + "R_386_TLS_LE_32", + "R_386_TLS_TPOFF", + "R_386_TLS_TPOFF32", + "R_390", + "R_390_12", + "R_390_16", + "R_390_20", + "R_390_32", + "R_390_64", + "R_390_8", + "R_390_COPY", + "R_390_GLOB_DAT", + "R_390_GOT12", + "R_390_GOT16", + "R_390_GOT20", + "R_390_GOT32", + "R_390_GOT64", + "R_390_GOTENT", + "R_390_GOTOFF", + "R_390_GOTOFF16", + "R_390_GOTOFF64", + "R_390_GOTPC", + "R_390_GOTPCDBL", + "R_390_GOTPLT12", + "R_390_GOTPLT16", + "R_390_GOTPLT20", + "R_390_GOTPLT32", + "R_390_GOTPLT64", + "R_390_GOTPLTENT", + "R_390_GOTPLTOFF16", + "R_390_GOTPLTOFF32", + "R_390_GOTPLTOFF64", + "R_390_JMP_SLOT", + "R_390_NONE", + "R_390_PC16", + "R_390_PC16DBL", + "R_390_PC32", + "R_390_PC32DBL", + "R_390_PC64", + "R_390_PLT16DBL", + "R_390_PLT32", + "R_390_PLT32DBL", + "R_390_PLT64", + "R_390_RELATIVE", + "R_390_TLS_DTPMOD", + "R_390_TLS_DTPOFF", + "R_390_TLS_GD32", + "R_390_TLS_GD64", + "R_390_TLS_GDCALL", + "R_390_TLS_GOTIE12", + "R_390_TLS_GOTIE20", + "R_390_TLS_GOTIE32", + "R_390_TLS_GOTIE64", + "R_390_TLS_IE32", + "R_390_TLS_IE64", + "R_390_TLS_IEENT", + "R_390_TLS_LDCALL", + "R_390_TLS_LDM32", + "R_390_TLS_LDM64", + "R_390_TLS_LDO32", + "R_390_TLS_LDO64", + "R_390_TLS_LE32", + "R_390_TLS_LE64", + "R_390_TLS_LOAD", + "R_390_TLS_TPOFF", + "R_AARCH64", + "R_AARCH64_ABS16", + "R_AARCH64_ABS32", + "R_AARCH64_ABS64", + "R_AARCH64_ADD_ABS_LO12_NC", + "R_AARCH64_ADR_GOT_PAGE", + "R_AARCH64_ADR_PREL_LO21", + "R_AARCH64_ADR_PREL_PG_HI21", + "R_AARCH64_ADR_PREL_PG_HI21_NC", + "R_AARCH64_CALL26", + "R_AARCH64_CONDBR19", + "R_AARCH64_COPY", + "R_AARCH64_GLOB_DAT", + "R_AARCH64_GOT_LD_PREL19", + "R_AARCH64_IRELATIVE", + "R_AARCH64_JUMP26", + "R_AARCH64_JUMP_SLOT", + "R_AARCH64_LD64_GOTOFF_LO15", + "R_AARCH64_LD64_GOTPAGE_LO15", + "R_AARCH64_LD64_GOT_LO12_NC", + "R_AARCH64_LDST128_ABS_LO12_NC", + "R_AARCH64_LDST16_ABS_LO12_NC", + "R_AARCH64_LDST32_ABS_LO12_NC", + "R_AARCH64_LDST64_ABS_LO12_NC", + "R_AARCH64_LDST8_ABS_LO12_NC", + "R_AARCH64_LD_PREL_LO19", + "R_AARCH64_MOVW_SABS_G0", + "R_AARCH64_MOVW_SABS_G1", + "R_AARCH64_MOVW_SABS_G2", + "R_AARCH64_MOVW_UABS_G0", + "R_AARCH64_MOVW_UABS_G0_NC", + "R_AARCH64_MOVW_UABS_G1", + "R_AARCH64_MOVW_UABS_G1_NC", + "R_AARCH64_MOVW_UABS_G2", + "R_AARCH64_MOVW_UABS_G2_NC", + "R_AARCH64_MOVW_UABS_G3", + "R_AARCH64_NONE", + "R_AARCH64_NULL", + "R_AARCH64_P32_ABS16", + "R_AARCH64_P32_ABS32", + "R_AARCH64_P32_ADD_ABS_LO12_NC", + "R_AARCH64_P32_ADR_GOT_PAGE", + "R_AARCH64_P32_ADR_PREL_LO21", + "R_AARCH64_P32_ADR_PREL_PG_HI21", + "R_AARCH64_P32_CALL26", + "R_AARCH64_P32_CONDBR19", + "R_AARCH64_P32_COPY", + "R_AARCH64_P32_GLOB_DAT", + "R_AARCH64_P32_GOT_LD_PREL19", + "R_AARCH64_P32_IRELATIVE", + "R_AARCH64_P32_JUMP26", + "R_AARCH64_P32_JUMP_SLOT", + "R_AARCH64_P32_LD32_GOT_LO12_NC", + "R_AARCH64_P32_LDST128_ABS_LO12_NC", + "R_AARCH64_P32_LDST16_ABS_LO12_NC", + "R_AARCH64_P32_LDST32_ABS_LO12_NC", + "R_AARCH64_P32_LDST64_ABS_LO12_NC", + "R_AARCH64_P32_LDST8_ABS_LO12_NC", + "R_AARCH64_P32_LD_PREL_LO19", + "R_AARCH64_P32_MOVW_SABS_G0", + "R_AARCH64_P32_MOVW_UABS_G0", + "R_AARCH64_P32_MOVW_UABS_G0_NC", + "R_AARCH64_P32_MOVW_UABS_G1", + "R_AARCH64_P32_PREL16", + "R_AARCH64_P32_PREL32", + "R_AARCH64_P32_RELATIVE", + "R_AARCH64_P32_TLSDESC", + "R_AARCH64_P32_TLSDESC_ADD_LO12_NC", + "R_AARCH64_P32_TLSDESC_ADR_PAGE21", + "R_AARCH64_P32_TLSDESC_ADR_PREL21", + "R_AARCH64_P32_TLSDESC_CALL", + "R_AARCH64_P32_TLSDESC_LD32_LO12_NC", + "R_AARCH64_P32_TLSDESC_LD_PREL19", + "R_AARCH64_P32_TLSGD_ADD_LO12_NC", + "R_AARCH64_P32_TLSGD_ADR_PAGE21", + "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", + "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", + "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", + "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", + "R_AARCH64_P32_TLS_DTPMOD", + "R_AARCH64_P32_TLS_DTPREL", + "R_AARCH64_P32_TLS_TPREL", + "R_AARCH64_P32_TSTBR14", + "R_AARCH64_PREL16", + "R_AARCH64_PREL32", + "R_AARCH64_PREL64", + "R_AARCH64_RELATIVE", + "R_AARCH64_TLSDESC", + "R_AARCH64_TLSDESC_ADD", + "R_AARCH64_TLSDESC_ADD_LO12_NC", + "R_AARCH64_TLSDESC_ADR_PAGE21", + "R_AARCH64_TLSDESC_ADR_PREL21", + "R_AARCH64_TLSDESC_CALL", + "R_AARCH64_TLSDESC_LD64_LO12_NC", + "R_AARCH64_TLSDESC_LDR", + "R_AARCH64_TLSDESC_LD_PREL19", + "R_AARCH64_TLSDESC_OFF_G0_NC", + "R_AARCH64_TLSDESC_OFF_G1", + "R_AARCH64_TLSGD_ADD_LO12_NC", + "R_AARCH64_TLSGD_ADR_PAGE21", + "R_AARCH64_TLSGD_ADR_PREL21", + "R_AARCH64_TLSGD_MOVW_G0_NC", + "R_AARCH64_TLSGD_MOVW_G1", + "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", + "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", + "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", + "R_AARCH64_TLSLD_ADR_PAGE21", + "R_AARCH64_TLSLD_ADR_PREL21", + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12", + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", + "R_AARCH64_TLSLE_ADD_TPREL_HI12", + "R_AARCH64_TLSLE_ADD_TPREL_LO12", + "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", + "R_AARCH64_TLSLE_LDST128_TPREL_LO12", + "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G0", + "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G1", + "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G2", + "R_AARCH64_TLS_DTPMOD64", + "R_AARCH64_TLS_DTPREL64", + "R_AARCH64_TLS_TPREL64", + "R_AARCH64_TSTBR14", + "R_ALPHA", + "R_ALPHA_BRADDR", + "R_ALPHA_COPY", + "R_ALPHA_GLOB_DAT", + "R_ALPHA_GPDISP", + "R_ALPHA_GPREL32", + "R_ALPHA_GPRELHIGH", + "R_ALPHA_GPRELLOW", + "R_ALPHA_GPVALUE", + "R_ALPHA_HINT", + "R_ALPHA_IMMED_BR_HI32", + "R_ALPHA_IMMED_GP_16", + "R_ALPHA_IMMED_GP_HI32", + "R_ALPHA_IMMED_LO32", + "R_ALPHA_IMMED_SCN_HI32", + "R_ALPHA_JMP_SLOT", + "R_ALPHA_LITERAL", + "R_ALPHA_LITUSE", + "R_ALPHA_NONE", + "R_ALPHA_OP_PRSHIFT", + "R_ALPHA_OP_PSUB", + "R_ALPHA_OP_PUSH", + "R_ALPHA_OP_STORE", + "R_ALPHA_REFLONG", + "R_ALPHA_REFQUAD", + "R_ALPHA_RELATIVE", + "R_ALPHA_SREL16", + "R_ALPHA_SREL32", + "R_ALPHA_SREL64", + "R_ARM", + "R_ARM_ABS12", + "R_ARM_ABS16", + "R_ARM_ABS32", + "R_ARM_ABS32_NOI", + "R_ARM_ABS8", + "R_ARM_ALU_PCREL_15_8", + "R_ARM_ALU_PCREL_23_15", + "R_ARM_ALU_PCREL_7_0", + "R_ARM_ALU_PC_G0", + "R_ARM_ALU_PC_G0_NC", + "R_ARM_ALU_PC_G1", + "R_ARM_ALU_PC_G1_NC", + "R_ARM_ALU_PC_G2", + "R_ARM_ALU_SBREL_19_12_NC", + "R_ARM_ALU_SBREL_27_20_CK", + "R_ARM_ALU_SB_G0", + "R_ARM_ALU_SB_G0_NC", + "R_ARM_ALU_SB_G1", + "R_ARM_ALU_SB_G1_NC", + "R_ARM_ALU_SB_G2", + "R_ARM_AMP_VCALL9", + "R_ARM_BASE_ABS", + "R_ARM_CALL", + "R_ARM_COPY", + "R_ARM_GLOB_DAT", + "R_ARM_GNU_VTENTRY", + "R_ARM_GNU_VTINHERIT", + "R_ARM_GOT32", + "R_ARM_GOTOFF", + "R_ARM_GOTOFF12", + "R_ARM_GOTPC", + "R_ARM_GOTRELAX", + "R_ARM_GOT_ABS", + "R_ARM_GOT_BREL12", + "R_ARM_GOT_PREL", + "R_ARM_IRELATIVE", + "R_ARM_JUMP24", + "R_ARM_JUMP_SLOT", + "R_ARM_LDC_PC_G0", + "R_ARM_LDC_PC_G1", + "R_ARM_LDC_PC_G2", + "R_ARM_LDC_SB_G0", + "R_ARM_LDC_SB_G1", + "R_ARM_LDC_SB_G2", + "R_ARM_LDRS_PC_G0", + "R_ARM_LDRS_PC_G1", + "R_ARM_LDRS_PC_G2", + "R_ARM_LDRS_SB_G0", + "R_ARM_LDRS_SB_G1", + "R_ARM_LDRS_SB_G2", + "R_ARM_LDR_PC_G1", + "R_ARM_LDR_PC_G2", + "R_ARM_LDR_SBREL_11_10_NC", + "R_ARM_LDR_SB_G0", + "R_ARM_LDR_SB_G1", + "R_ARM_LDR_SB_G2", + "R_ARM_ME_TOO", + "R_ARM_MOVT_ABS", + "R_ARM_MOVT_BREL", + "R_ARM_MOVT_PREL", + "R_ARM_MOVW_ABS_NC", + "R_ARM_MOVW_BREL", + "R_ARM_MOVW_BREL_NC", + "R_ARM_MOVW_PREL_NC", + "R_ARM_NONE", + "R_ARM_PC13", + "R_ARM_PC24", + "R_ARM_PLT32", + "R_ARM_PLT32_ABS", + "R_ARM_PREL31", + "R_ARM_PRIVATE_0", + "R_ARM_PRIVATE_1", + "R_ARM_PRIVATE_10", + "R_ARM_PRIVATE_11", + "R_ARM_PRIVATE_12", + "R_ARM_PRIVATE_13", + "R_ARM_PRIVATE_14", + "R_ARM_PRIVATE_15", + "R_ARM_PRIVATE_2", + "R_ARM_PRIVATE_3", + "R_ARM_PRIVATE_4", + "R_ARM_PRIVATE_5", + "R_ARM_PRIVATE_6", + "R_ARM_PRIVATE_7", + "R_ARM_PRIVATE_8", + "R_ARM_PRIVATE_9", + "R_ARM_RABS32", + "R_ARM_RBASE", + "R_ARM_REL32", + "R_ARM_REL32_NOI", + "R_ARM_RELATIVE", + "R_ARM_RPC24", + "R_ARM_RREL32", + "R_ARM_RSBREL32", + "R_ARM_RXPC25", + "R_ARM_SBREL31", + "R_ARM_SBREL32", + "R_ARM_SWI24", + "R_ARM_TARGET1", + "R_ARM_TARGET2", + "R_ARM_THM_ABS5", + "R_ARM_THM_ALU_ABS_G0_NC", + "R_ARM_THM_ALU_ABS_G1_NC", + "R_ARM_THM_ALU_ABS_G2_NC", + "R_ARM_THM_ALU_ABS_G3", + "R_ARM_THM_ALU_PREL_11_0", + "R_ARM_THM_GOT_BREL12", + "R_ARM_THM_JUMP11", + "R_ARM_THM_JUMP19", + "R_ARM_THM_JUMP24", + "R_ARM_THM_JUMP6", + "R_ARM_THM_JUMP8", + "R_ARM_THM_MOVT_ABS", + "R_ARM_THM_MOVT_BREL", + "R_ARM_THM_MOVT_PREL", + "R_ARM_THM_MOVW_ABS_NC", + "R_ARM_THM_MOVW_BREL", + "R_ARM_THM_MOVW_BREL_NC", + "R_ARM_THM_MOVW_PREL_NC", + "R_ARM_THM_PC12", + "R_ARM_THM_PC22", + "R_ARM_THM_PC8", + "R_ARM_THM_RPC22", + "R_ARM_THM_SWI8", + "R_ARM_THM_TLS_CALL", + "R_ARM_THM_TLS_DESCSEQ16", + "R_ARM_THM_TLS_DESCSEQ32", + "R_ARM_THM_XPC22", + "R_ARM_TLS_CALL", + "R_ARM_TLS_DESCSEQ", + "R_ARM_TLS_DTPMOD32", + "R_ARM_TLS_DTPOFF32", + "R_ARM_TLS_GD32", + "R_ARM_TLS_GOTDESC", + "R_ARM_TLS_IE12GP", + "R_ARM_TLS_IE32", + "R_ARM_TLS_LDM32", + "R_ARM_TLS_LDO12", + "R_ARM_TLS_LDO32", + "R_ARM_TLS_LE12", + "R_ARM_TLS_LE32", + "R_ARM_TLS_TPOFF32", + "R_ARM_V4BX", + "R_ARM_XPC25", + "R_INFO", + "R_INFO32", + "R_MIPS", + "R_MIPS_16", + "R_MIPS_26", + "R_MIPS_32", + "R_MIPS_64", + "R_MIPS_ADD_IMMEDIATE", + "R_MIPS_CALL16", + "R_MIPS_CALL_HI16", + "R_MIPS_CALL_LO16", + "R_MIPS_DELETE", + "R_MIPS_GOT16", + "R_MIPS_GOT_DISP", + "R_MIPS_GOT_HI16", + "R_MIPS_GOT_LO16", + "R_MIPS_GOT_OFST", + "R_MIPS_GOT_PAGE", + "R_MIPS_GPREL16", + "R_MIPS_GPREL32", + "R_MIPS_HI16", + "R_MIPS_HIGHER", + "R_MIPS_HIGHEST", + "R_MIPS_INSERT_A", + "R_MIPS_INSERT_B", + "R_MIPS_JALR", + "R_MIPS_LITERAL", + "R_MIPS_LO16", + "R_MIPS_NONE", + "R_MIPS_PC16", + "R_MIPS_PJUMP", + "R_MIPS_REL16", + "R_MIPS_REL32", + "R_MIPS_RELGOT", + "R_MIPS_SCN_DISP", + "R_MIPS_SHIFT5", + "R_MIPS_SHIFT6", + "R_MIPS_SUB", + "R_MIPS_TLS_DTPMOD32", + "R_MIPS_TLS_DTPMOD64", + "R_MIPS_TLS_DTPREL32", + "R_MIPS_TLS_DTPREL64", + "R_MIPS_TLS_DTPREL_HI16", + "R_MIPS_TLS_DTPREL_LO16", + "R_MIPS_TLS_GD", + "R_MIPS_TLS_GOTTPREL", + "R_MIPS_TLS_LDM", + "R_MIPS_TLS_TPREL32", + "R_MIPS_TLS_TPREL64", + "R_MIPS_TLS_TPREL_HI16", + "R_MIPS_TLS_TPREL_LO16", + "R_PPC", + "R_PPC64", + "R_PPC64_ADDR14", + "R_PPC64_ADDR14_BRNTAKEN", + "R_PPC64_ADDR14_BRTAKEN", + "R_PPC64_ADDR16", + "R_PPC64_ADDR16_DS", + "R_PPC64_ADDR16_HA", + "R_PPC64_ADDR16_HI", + "R_PPC64_ADDR16_HIGH", + "R_PPC64_ADDR16_HIGHA", + "R_PPC64_ADDR16_HIGHER", + "R_PPC64_ADDR16_HIGHERA", + "R_PPC64_ADDR16_HIGHEST", + "R_PPC64_ADDR16_HIGHESTA", + "R_PPC64_ADDR16_LO", + "R_PPC64_ADDR16_LO_DS", + "R_PPC64_ADDR24", + "R_PPC64_ADDR32", + "R_PPC64_ADDR64", + "R_PPC64_ADDR64_LOCAL", + "R_PPC64_DTPMOD64", + "R_PPC64_DTPREL16", + "R_PPC64_DTPREL16_DS", + "R_PPC64_DTPREL16_HA", + "R_PPC64_DTPREL16_HI", + "R_PPC64_DTPREL16_HIGH", + "R_PPC64_DTPREL16_HIGHA", + "R_PPC64_DTPREL16_HIGHER", + "R_PPC64_DTPREL16_HIGHERA", + "R_PPC64_DTPREL16_HIGHEST", + "R_PPC64_DTPREL16_HIGHESTA", + "R_PPC64_DTPREL16_LO", + "R_PPC64_DTPREL16_LO_DS", + "R_PPC64_DTPREL64", + "R_PPC64_ENTRY", + "R_PPC64_GOT16", + "R_PPC64_GOT16_DS", + "R_PPC64_GOT16_HA", + "R_PPC64_GOT16_HI", + "R_PPC64_GOT16_LO", + "R_PPC64_GOT16_LO_DS", + "R_PPC64_GOT_DTPREL16_DS", + "R_PPC64_GOT_DTPREL16_HA", + "R_PPC64_GOT_DTPREL16_HI", + "R_PPC64_GOT_DTPREL16_LO_DS", + "R_PPC64_GOT_TLSGD16", + "R_PPC64_GOT_TLSGD16_HA", + "R_PPC64_GOT_TLSGD16_HI", + "R_PPC64_GOT_TLSGD16_LO", + "R_PPC64_GOT_TLSLD16", + "R_PPC64_GOT_TLSLD16_HA", + "R_PPC64_GOT_TLSLD16_HI", + "R_PPC64_GOT_TLSLD16_LO", + "R_PPC64_GOT_TPREL16_DS", + "R_PPC64_GOT_TPREL16_HA", + "R_PPC64_GOT_TPREL16_HI", + "R_PPC64_GOT_TPREL16_LO_DS", + "R_PPC64_IRELATIVE", + "R_PPC64_JMP_IREL", + "R_PPC64_JMP_SLOT", + "R_PPC64_NONE", + "R_PPC64_PLT16_LO_DS", + "R_PPC64_PLTGOT16", + "R_PPC64_PLTGOT16_DS", + "R_PPC64_PLTGOT16_HA", + "R_PPC64_PLTGOT16_HI", + "R_PPC64_PLTGOT16_LO", + "R_PPC64_PLTGOT_LO_DS", + "R_PPC64_REL14", + "R_PPC64_REL14_BRNTAKEN", + "R_PPC64_REL14_BRTAKEN", + "R_PPC64_REL16", + "R_PPC64_REL16DX_HA", + "R_PPC64_REL16_HA", + "R_PPC64_REL16_HI", + "R_PPC64_REL16_LO", + "R_PPC64_REL24", + "R_PPC64_REL24_NOTOC", + "R_PPC64_REL32", + "R_PPC64_REL64", + "R_PPC64_SECTOFF_DS", + "R_PPC64_SECTOFF_LO_DS", + "R_PPC64_TLS", + "R_PPC64_TLSGD", + "R_PPC64_TLSLD", + "R_PPC64_TOC", + "R_PPC64_TOC16", + "R_PPC64_TOC16_DS", + "R_PPC64_TOC16_HA", + "R_PPC64_TOC16_HI", + "R_PPC64_TOC16_LO", + "R_PPC64_TOC16_LO_DS", + "R_PPC64_TOCSAVE", + "R_PPC64_TPREL16", + "R_PPC64_TPREL16_DS", + "R_PPC64_TPREL16_HA", + "R_PPC64_TPREL16_HI", + "R_PPC64_TPREL16_HIGH", + "R_PPC64_TPREL16_HIGHA", + "R_PPC64_TPREL16_HIGHER", + "R_PPC64_TPREL16_HIGHERA", + "R_PPC64_TPREL16_HIGHEST", + "R_PPC64_TPREL16_HIGHESTA", + "R_PPC64_TPREL16_LO", + "R_PPC64_TPREL16_LO_DS", + "R_PPC64_TPREL64", + "R_PPC_ADDR14", + "R_PPC_ADDR14_BRNTAKEN", + "R_PPC_ADDR14_BRTAKEN", + "R_PPC_ADDR16", + "R_PPC_ADDR16_HA", + "R_PPC_ADDR16_HI", + "R_PPC_ADDR16_LO", + "R_PPC_ADDR24", + "R_PPC_ADDR32", + "R_PPC_COPY", + "R_PPC_DTPMOD32", + "R_PPC_DTPREL16", + "R_PPC_DTPREL16_HA", + "R_PPC_DTPREL16_HI", + "R_PPC_DTPREL16_LO", + "R_PPC_DTPREL32", + "R_PPC_EMB_BIT_FLD", + "R_PPC_EMB_MRKREF", + "R_PPC_EMB_NADDR16", + "R_PPC_EMB_NADDR16_HA", + "R_PPC_EMB_NADDR16_HI", + "R_PPC_EMB_NADDR16_LO", + "R_PPC_EMB_NADDR32", + "R_PPC_EMB_RELSDA", + "R_PPC_EMB_RELSEC16", + "R_PPC_EMB_RELST_HA", + "R_PPC_EMB_RELST_HI", + "R_PPC_EMB_RELST_LO", + "R_PPC_EMB_SDA21", + "R_PPC_EMB_SDA2I16", + "R_PPC_EMB_SDA2REL", + "R_PPC_EMB_SDAI16", + "R_PPC_GLOB_DAT", + "R_PPC_GOT16", + "R_PPC_GOT16_HA", + "R_PPC_GOT16_HI", + "R_PPC_GOT16_LO", + "R_PPC_GOT_TLSGD16", + "R_PPC_GOT_TLSGD16_HA", + "R_PPC_GOT_TLSGD16_HI", + "R_PPC_GOT_TLSGD16_LO", + "R_PPC_GOT_TLSLD16", + "R_PPC_GOT_TLSLD16_HA", + "R_PPC_GOT_TLSLD16_HI", + "R_PPC_GOT_TLSLD16_LO", + "R_PPC_GOT_TPREL16", + "R_PPC_GOT_TPREL16_HA", + "R_PPC_GOT_TPREL16_HI", + "R_PPC_GOT_TPREL16_LO", + "R_PPC_JMP_SLOT", + "R_PPC_LOCAL24PC", + "R_PPC_NONE", + "R_PPC_PLT16_HA", + "R_PPC_PLT16_HI", + "R_PPC_PLT16_LO", + "R_PPC_PLT32", + "R_PPC_PLTREL24", + "R_PPC_PLTREL32", + "R_PPC_REL14", + "R_PPC_REL14_BRNTAKEN", + "R_PPC_REL14_BRTAKEN", + "R_PPC_REL24", + "R_PPC_REL32", + "R_PPC_RELATIVE", + "R_PPC_SDAREL16", + "R_PPC_SECTOFF", + "R_PPC_SECTOFF_HA", + "R_PPC_SECTOFF_HI", + "R_PPC_SECTOFF_LO", + "R_PPC_TLS", + "R_PPC_TPREL16", + "R_PPC_TPREL16_HA", + "R_PPC_TPREL16_HI", + "R_PPC_TPREL16_LO", + "R_PPC_TPREL32", + "R_PPC_UADDR16", + "R_PPC_UADDR32", + "R_RISCV", + "R_RISCV_32", + "R_RISCV_32_PCREL", + "R_RISCV_64", + "R_RISCV_ADD16", + "R_RISCV_ADD32", + "R_RISCV_ADD64", + "R_RISCV_ADD8", + "R_RISCV_ALIGN", + "R_RISCV_BRANCH", + "R_RISCV_CALL", + "R_RISCV_CALL_PLT", + "R_RISCV_COPY", + "R_RISCV_GNU_VTENTRY", + "R_RISCV_GNU_VTINHERIT", + "R_RISCV_GOT_HI20", + "R_RISCV_GPREL_I", + "R_RISCV_GPREL_S", + "R_RISCV_HI20", + "R_RISCV_JAL", + "R_RISCV_JUMP_SLOT", + "R_RISCV_LO12_I", + "R_RISCV_LO12_S", + "R_RISCV_NONE", + "R_RISCV_PCREL_HI20", + "R_RISCV_PCREL_LO12_I", + "R_RISCV_PCREL_LO12_S", + "R_RISCV_RELATIVE", + "R_RISCV_RELAX", + "R_RISCV_RVC_BRANCH", + "R_RISCV_RVC_JUMP", + "R_RISCV_RVC_LUI", + "R_RISCV_SET16", + "R_RISCV_SET32", + "R_RISCV_SET6", + "R_RISCV_SET8", + "R_RISCV_SUB16", + "R_RISCV_SUB32", + "R_RISCV_SUB6", + "R_RISCV_SUB64", + "R_RISCV_SUB8", + "R_RISCV_TLS_DTPMOD32", + "R_RISCV_TLS_DTPMOD64", + "R_RISCV_TLS_DTPREL32", + "R_RISCV_TLS_DTPREL64", + "R_RISCV_TLS_GD_HI20", + "R_RISCV_TLS_GOT_HI20", + "R_RISCV_TLS_TPREL32", + "R_RISCV_TLS_TPREL64", + "R_RISCV_TPREL_ADD", + "R_RISCV_TPREL_HI20", + "R_RISCV_TPREL_I", + "R_RISCV_TPREL_LO12_I", + "R_RISCV_TPREL_LO12_S", + "R_RISCV_TPREL_S", + "R_SPARC", + "R_SPARC_10", + "R_SPARC_11", + "R_SPARC_13", + "R_SPARC_16", + "R_SPARC_22", + "R_SPARC_32", + "R_SPARC_5", + "R_SPARC_6", + "R_SPARC_64", + "R_SPARC_7", + "R_SPARC_8", + "R_SPARC_COPY", + "R_SPARC_DISP16", + "R_SPARC_DISP32", + "R_SPARC_DISP64", + "R_SPARC_DISP8", + "R_SPARC_GLOB_DAT", + "R_SPARC_GLOB_JMP", + "R_SPARC_GOT10", + "R_SPARC_GOT13", + "R_SPARC_GOT22", + "R_SPARC_H44", + "R_SPARC_HH22", + "R_SPARC_HI22", + "R_SPARC_HIPLT22", + "R_SPARC_HIX22", + "R_SPARC_HM10", + "R_SPARC_JMP_SLOT", + "R_SPARC_L44", + "R_SPARC_LM22", + "R_SPARC_LO10", + "R_SPARC_LOPLT10", + "R_SPARC_LOX10", + "R_SPARC_M44", + "R_SPARC_NONE", + "R_SPARC_OLO10", + "R_SPARC_PC10", + "R_SPARC_PC22", + "R_SPARC_PCPLT10", + "R_SPARC_PCPLT22", + "R_SPARC_PCPLT32", + "R_SPARC_PC_HH22", + "R_SPARC_PC_HM10", + "R_SPARC_PC_LM22", + "R_SPARC_PLT32", + "R_SPARC_PLT64", + "R_SPARC_REGISTER", + "R_SPARC_RELATIVE", + "R_SPARC_UA16", + "R_SPARC_UA32", + "R_SPARC_UA64", + "R_SPARC_WDISP16", + "R_SPARC_WDISP19", + "R_SPARC_WDISP22", + "R_SPARC_WDISP30", + "R_SPARC_WPLT30", + "R_SYM32", + "R_SYM64", + "R_TYPE32", + "R_TYPE64", + "R_X86_64", + "R_X86_64_16", + "R_X86_64_32", + "R_X86_64_32S", + "R_X86_64_64", + "R_X86_64_8", + "R_X86_64_COPY", + "R_X86_64_DTPMOD64", + "R_X86_64_DTPOFF32", + "R_X86_64_DTPOFF64", + "R_X86_64_GLOB_DAT", + "R_X86_64_GOT32", + "R_X86_64_GOT64", + "R_X86_64_GOTOFF64", + "R_X86_64_GOTPC32", + "R_X86_64_GOTPC32_TLSDESC", + "R_X86_64_GOTPC64", + "R_X86_64_GOTPCREL", + "R_X86_64_GOTPCREL64", + "R_X86_64_GOTPCRELX", + "R_X86_64_GOTPLT64", + "R_X86_64_GOTTPOFF", + "R_X86_64_IRELATIVE", + "R_X86_64_JMP_SLOT", + "R_X86_64_NONE", + "R_X86_64_PC16", + "R_X86_64_PC32", + "R_X86_64_PC32_BND", + "R_X86_64_PC64", + "R_X86_64_PC8", + "R_X86_64_PLT32", + "R_X86_64_PLT32_BND", + "R_X86_64_PLTOFF64", + "R_X86_64_RELATIVE", + "R_X86_64_RELATIVE64", + "R_X86_64_REX_GOTPCRELX", + "R_X86_64_SIZE32", + "R_X86_64_SIZE64", + "R_X86_64_TLSDESC", + "R_X86_64_TLSDESC_CALL", + "R_X86_64_TLSGD", + "R_X86_64_TLSLD", + "R_X86_64_TPOFF32", + "R_X86_64_TPOFF64", + "Rel32", + "Rel64", + "Rela32", + "Rela64", + "SHF_ALLOC", + "SHF_COMPRESSED", + "SHF_EXECINSTR", + "SHF_GROUP", + "SHF_INFO_LINK", + "SHF_LINK_ORDER", + "SHF_MASKOS", + "SHF_MASKPROC", + "SHF_MERGE", + "SHF_OS_NONCONFORMING", + "SHF_STRINGS", + "SHF_TLS", + "SHF_WRITE", + "SHN_ABS", + "SHN_COMMON", + "SHN_HIOS", + "SHN_HIPROC", + "SHN_HIRESERVE", + "SHN_LOOS", + "SHN_LOPROC", + "SHN_LORESERVE", + "SHN_UNDEF", + "SHN_XINDEX", + "SHT_DYNAMIC", + "SHT_DYNSYM", + "SHT_FINI_ARRAY", + "SHT_GNU_ATTRIBUTES", + "SHT_GNU_HASH", + "SHT_GNU_LIBLIST", + "SHT_GNU_VERDEF", + "SHT_GNU_VERNEED", + "SHT_GNU_VERSYM", + "SHT_GROUP", + "SHT_HASH", + "SHT_HIOS", + "SHT_HIPROC", + "SHT_HIUSER", + "SHT_INIT_ARRAY", + "SHT_LOOS", + "SHT_LOPROC", + "SHT_LOUSER", + "SHT_NOBITS", + "SHT_NOTE", + "SHT_NULL", + "SHT_PREINIT_ARRAY", + "SHT_PROGBITS", + "SHT_REL", + "SHT_RELA", + "SHT_SHLIB", + "SHT_STRTAB", + "SHT_SYMTAB", + "SHT_SYMTAB_SHNDX", + "STB_GLOBAL", + "STB_HIOS", + "STB_HIPROC", + "STB_LOCAL", + "STB_LOOS", + "STB_LOPROC", + "STB_WEAK", + "STT_COMMON", + "STT_FILE", + "STT_FUNC", + "STT_HIOS", + "STT_HIPROC", + "STT_LOOS", + "STT_LOPROC", + "STT_NOTYPE", + "STT_OBJECT", + "STT_SECTION", + "STT_TLS", + "STV_DEFAULT", + "STV_HIDDEN", + "STV_INTERNAL", + "STV_PROTECTED", + "ST_BIND", + "ST_INFO", + "ST_TYPE", + "ST_VISIBILITY", + "Section", + "Section32", + "Section64", + "SectionFlag", + "SectionHeader", + "SectionIndex", + "SectionType", + "Sym32", + "Sym32Size", + "Sym64", + "Sym64Size", + "SymBind", + "SymType", + "SymVis", + "Symbol", + "Type", + "Version", + }, + "debug/gosym": []string{ + "DecodingError", + "Func", + "LineTable", + "NewLineTable", + "NewTable", + "Obj", + "Sym", + "Table", + "UnknownFileError", + "UnknownLineError", + }, + "debug/macho": []string{ + "ARM64_RELOC_ADDEND", + "ARM64_RELOC_BRANCH26", + "ARM64_RELOC_GOT_LOAD_PAGE21", + "ARM64_RELOC_GOT_LOAD_PAGEOFF12", + "ARM64_RELOC_PAGE21", + "ARM64_RELOC_PAGEOFF12", + "ARM64_RELOC_POINTER_TO_GOT", + "ARM64_RELOC_SUBTRACTOR", + "ARM64_RELOC_TLVP_LOAD_PAGE21", + "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", + "ARM64_RELOC_UNSIGNED", + "ARM_RELOC_BR24", + "ARM_RELOC_HALF", + "ARM_RELOC_HALF_SECTDIFF", + "ARM_RELOC_LOCAL_SECTDIFF", + "ARM_RELOC_PAIR", + "ARM_RELOC_PB_LA_PTR", + "ARM_RELOC_SECTDIFF", + "ARM_RELOC_VANILLA", + "ARM_THUMB_32BIT_BRANCH", + "ARM_THUMB_RELOC_BR22", + "Cpu", + "Cpu386", + "CpuAmd64", + "CpuArm", + "CpuArm64", + "CpuPpc", + "CpuPpc64", + "Dylib", + "DylibCmd", + "Dysymtab", + "DysymtabCmd", + "ErrNotFat", + "FatArch", + "FatArchHeader", + "FatFile", + "File", + "FileHeader", + "FlagAllModsBound", + "FlagAllowStackExecution", + "FlagAppExtensionSafe", + "FlagBindAtLoad", + "FlagBindsToWeak", + "FlagCanonical", + "FlagDeadStrippableDylib", + "FlagDyldLink", + "FlagForceFlat", + "FlagHasTLVDescriptors", + "FlagIncrLink", + "FlagLazyInit", + "FlagNoFixPrebinding", + "FlagNoHeapExecution", + "FlagNoMultiDefs", + "FlagNoReexportedDylibs", + "FlagNoUndefs", + "FlagPIE", + "FlagPrebindable", + "FlagPrebound", + "FlagRootSafe", + "FlagSetuidSafe", + "FlagSplitSegs", + "FlagSubsectionsViaSymbols", + "FlagTwoLevel", + "FlagWeakDefines", + "FormatError", + "GENERIC_RELOC_LOCAL_SECTDIFF", + "GENERIC_RELOC_PAIR", + "GENERIC_RELOC_PB_LA_PTR", + "GENERIC_RELOC_SECTDIFF", + "GENERIC_RELOC_TLV", + "GENERIC_RELOC_VANILLA", + "Load", + "LoadBytes", + "LoadCmd", + "LoadCmdDylib", + "LoadCmdDylinker", + "LoadCmdDysymtab", + "LoadCmdRpath", + "LoadCmdSegment", + "LoadCmdSegment64", + "LoadCmdSymtab", + "LoadCmdThread", + "LoadCmdUnixThread", + "Magic32", + "Magic64", + "MagicFat", + "NewFatFile", + "NewFile", + "Nlist32", + "Nlist64", + "Open", + "OpenFat", + "Regs386", + "RegsAMD64", + "Reloc", + "RelocTypeARM", + "RelocTypeARM64", + "RelocTypeGeneric", + "RelocTypeX86_64", + "Rpath", + "RpathCmd", + "Section", + "Section32", + "Section64", + "SectionHeader", + "Segment", + "Segment32", + "Segment64", + "SegmentHeader", + "Symbol", + "Symtab", + "SymtabCmd", + "Thread", + "Type", + "TypeBundle", + "TypeDylib", + "TypeExec", + "TypeObj", + "X86_64_RELOC_BRANCH", + "X86_64_RELOC_GOT", + "X86_64_RELOC_GOT_LOAD", + "X86_64_RELOC_SIGNED", + "X86_64_RELOC_SIGNED_1", + "X86_64_RELOC_SIGNED_2", + "X86_64_RELOC_SIGNED_4", + "X86_64_RELOC_SUBTRACTOR", + "X86_64_RELOC_TLV", + "X86_64_RELOC_UNSIGNED", + }, + "debug/pe": []string{ + "COFFSymbol", + "COFFSymbolSize", + "DataDirectory", + "File", + "FileHeader", + "FormatError", + "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", + "IMAGE_DIRECTORY_ENTRY_BASERELOC", + "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", + "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", + "IMAGE_DIRECTORY_ENTRY_DEBUG", + "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", + "IMAGE_DIRECTORY_ENTRY_EXCEPTION", + "IMAGE_DIRECTORY_ENTRY_EXPORT", + "IMAGE_DIRECTORY_ENTRY_GLOBALPTR", + "IMAGE_DIRECTORY_ENTRY_IAT", + "IMAGE_DIRECTORY_ENTRY_IMPORT", + "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", + "IMAGE_DIRECTORY_ENTRY_RESOURCE", + "IMAGE_DIRECTORY_ENTRY_SECURITY", + "IMAGE_DIRECTORY_ENTRY_TLS", + "IMAGE_FILE_MACHINE_AM33", + "IMAGE_FILE_MACHINE_AMD64", + "IMAGE_FILE_MACHINE_ARM", + "IMAGE_FILE_MACHINE_ARM64", + "IMAGE_FILE_MACHINE_ARMNT", + "IMAGE_FILE_MACHINE_EBC", + "IMAGE_FILE_MACHINE_I386", + "IMAGE_FILE_MACHINE_IA64", + "IMAGE_FILE_MACHINE_M32R", + "IMAGE_FILE_MACHINE_MIPS16", + "IMAGE_FILE_MACHINE_MIPSFPU", + "IMAGE_FILE_MACHINE_MIPSFPU16", + "IMAGE_FILE_MACHINE_POWERPC", + "IMAGE_FILE_MACHINE_POWERPCFP", + "IMAGE_FILE_MACHINE_R4000", + "IMAGE_FILE_MACHINE_SH3", + "IMAGE_FILE_MACHINE_SH3DSP", + "IMAGE_FILE_MACHINE_SH4", + "IMAGE_FILE_MACHINE_SH5", + "IMAGE_FILE_MACHINE_THUMB", + "IMAGE_FILE_MACHINE_UNKNOWN", + "IMAGE_FILE_MACHINE_WCEMIPSV2", + "ImportDirectory", + "NewFile", + "Open", + "OptionalHeader32", + "OptionalHeader64", + "Reloc", + "Section", + "SectionHeader", + "SectionHeader32", + "StringTable", + "Symbol", + }, + "debug/plan9obj": []string{ + "File", + "FileHeader", + "Magic386", + "Magic64", + "MagicAMD64", + "MagicARM", + "NewFile", + "Open", + "Section", + "SectionHeader", + "Sym", + }, + "encoding": []string{ + "BinaryMarshaler", + "BinaryUnmarshaler", + "TextMarshaler", + "TextUnmarshaler", + }, + "encoding/ascii85": []string{ + "CorruptInputError", + "Decode", + "Encode", + "MaxEncodedLen", + "NewDecoder", + "NewEncoder", + }, + "encoding/asn1": []string{ + "BitString", + "ClassApplication", + "ClassContextSpecific", + "ClassPrivate", + "ClassUniversal", + "Enumerated", + "Flag", + "Marshal", + "MarshalWithParams", + "NullBytes", + "NullRawValue", + "ObjectIdentifier", + "RawContent", + "RawValue", + "StructuralError", + "SyntaxError", + "TagBMPString", + "TagBitString", + "TagBoolean", + "TagEnum", + "TagGeneralString", + "TagGeneralizedTime", + "TagIA5String", + "TagInteger", + "TagNull", + "TagNumericString", + "TagOID", + "TagOctetString", + "TagPrintableString", + "TagSequence", + "TagSet", + "TagT61String", + "TagUTCTime", + "TagUTF8String", + "Unmarshal", + "UnmarshalWithParams", + }, + "encoding/base32": []string{ + "CorruptInputError", + "Encoding", + "HexEncoding", + "NewDecoder", + "NewEncoder", + "NewEncoding", + "NoPadding", + "StdEncoding", + "StdPadding", + }, + "encoding/base64": []string{ + "CorruptInputError", + "Encoding", + "NewDecoder", + "NewEncoder", + "NewEncoding", + "NoPadding", + "RawStdEncoding", + "RawURLEncoding", + "StdEncoding", + "StdPadding", + "URLEncoding", + }, + "encoding/binary": []string{ + "BigEndian", + "ByteOrder", + "LittleEndian", + "MaxVarintLen16", + "MaxVarintLen32", + "MaxVarintLen64", + "PutUvarint", + "PutVarint", + "Read", + "ReadUvarint", + "ReadVarint", + "Size", + "Uvarint", + "Varint", + "Write", + }, + "encoding/csv": []string{ + "ErrBareQuote", + "ErrFieldCount", + "ErrQuote", + "ErrTrailingComma", + "NewReader", + "NewWriter", + "ParseError", + "Reader", + "Writer", + }, + "encoding/gob": []string{ + "CommonType", + "Decoder", + "Encoder", + "GobDecoder", + "GobEncoder", + "NewDecoder", + "NewEncoder", + "Register", + "RegisterName", + }, + "encoding/hex": []string{ + "Decode", + "DecodeString", + "DecodedLen", + "Dump", + "Dumper", + "Encode", + "EncodeToString", + "EncodedLen", + "ErrLength", + "InvalidByteError", + "NewDecoder", + "NewEncoder", + }, + "encoding/json": []string{ + "Compact", + "Decoder", + "Delim", + "Encoder", + "HTMLEscape", + "Indent", + "InvalidUTF8Error", + "InvalidUnmarshalError", + "Marshal", + "MarshalIndent", + "Marshaler", + "MarshalerError", + "NewDecoder", + "NewEncoder", + "Number", + "RawMessage", + "SyntaxError", + "Token", + "Unmarshal", + "UnmarshalFieldError", + "UnmarshalTypeError", + "Unmarshaler", + "UnsupportedTypeError", + "UnsupportedValueError", + "Valid", + }, + "encoding/pem": []string{ + "Block", + "Decode", + "Encode", + "EncodeToMemory", + }, + "encoding/xml": []string{ + "Attr", + "CharData", + "Comment", + "CopyToken", + "Decoder", + "Directive", + "Encoder", + "EndElement", + "Escape", + "EscapeText", + "HTMLAutoClose", + "HTMLEntity", + "Header", + "Marshal", + "MarshalIndent", + "Marshaler", + "MarshalerAttr", + "Name", + "NewDecoder", + "NewEncoder", + "NewTokenDecoder", + "ProcInst", + "StartElement", + "SyntaxError", + "TagPathError", + "Token", + "TokenReader", + "Unmarshal", + "UnmarshalError", + "Unmarshaler", + "UnmarshalerAttr", + "UnsupportedTypeError", + }, + "errors": []string{ + "As", + "Is", + "New", + "Unwrap", + }, + "expvar": []string{ + "Do", + "Float", + "Func", + "Get", + "Handler", + "Int", + "KeyValue", + "Map", + "NewFloat", + "NewInt", + "NewMap", + "NewString", + "Publish", + "String", + "Var", + }, + "flag": []string{ + "Arg", + "Args", + "Bool", + "BoolVar", + "CommandLine", + "ContinueOnError", + "Duration", + "DurationVar", + "ErrHelp", + "ErrorHandling", + "ExitOnError", + "Flag", + "FlagSet", + "Float64", + "Float64Var", + "Getter", + "Int", + "Int64", + "Int64Var", + "IntVar", + "Lookup", + "NArg", + "NFlag", + "NewFlagSet", + "PanicOnError", + "Parse", + "Parsed", + "PrintDefaults", + "Set", + "String", + "StringVar", + "Uint", + "Uint64", + "Uint64Var", + "UintVar", + "UnquoteUsage", + "Usage", + "Value", + "Var", + "Visit", + "VisitAll", + }, + "fmt": []string{ + "Errorf", + "Formatter", + "Fprint", + "Fprintf", + "Fprintln", + "Fscan", + "Fscanf", + "Fscanln", + "GoStringer", + "Print", + "Printf", + "Println", + "Scan", + "ScanState", + "Scanf", + "Scanln", + "Scanner", + "Sprint", + "Sprintf", + "Sprintln", + "Sscan", + "Sscanf", + "Sscanln", + "State", + "Stringer", + }, + "go/ast": []string{ + "ArrayType", + "AssignStmt", + "Bad", + "BadDecl", + "BadExpr", + "BadStmt", + "BasicLit", + "BinaryExpr", + "BlockStmt", + "BranchStmt", + "CallExpr", + "CaseClause", + "ChanDir", + "ChanType", + "CommClause", + "Comment", + "CommentGroup", + "CommentMap", + "CompositeLit", + "Con", + "Decl", + "DeclStmt", + "DeferStmt", + "Ellipsis", + "EmptyStmt", + "Expr", + "ExprStmt", + "Field", + "FieldFilter", + "FieldList", + "File", + "FileExports", + "Filter", + "FilterDecl", + "FilterFile", + "FilterFuncDuplicates", + "FilterImportDuplicates", + "FilterPackage", + "FilterUnassociatedComments", + "ForStmt", + "Fprint", + "Fun", + "FuncDecl", + "FuncLit", + "FuncType", + "GenDecl", + "GoStmt", + "Ident", + "IfStmt", + "ImportSpec", + "Importer", + "IncDecStmt", + "IndexExpr", + "Inspect", + "InterfaceType", + "IsExported", + "KeyValueExpr", + "LabeledStmt", + "Lbl", + "MapType", + "MergeMode", + "MergePackageFiles", + "NewCommentMap", + "NewIdent", + "NewObj", + "NewPackage", + "NewScope", + "Node", + "NotNilFilter", + "ObjKind", + "Object", + "Package", + "PackageExports", + "ParenExpr", + "Pkg", + "Print", + "RECV", + "RangeStmt", + "ReturnStmt", + "SEND", + "Scope", + "SelectStmt", + "SelectorExpr", + "SendStmt", + "SliceExpr", + "SortImports", + "Spec", + "StarExpr", + "Stmt", + "StructType", + "SwitchStmt", + "Typ", + "TypeAssertExpr", + "TypeSpec", + "TypeSwitchStmt", + "UnaryExpr", + "ValueSpec", + "Var", + "Visitor", + "Walk", + }, + "go/build": []string{ + "AllowBinary", + "ArchChar", + "Context", + "Default", + "FindOnly", + "IgnoreVendor", + "Import", + "ImportComment", + "ImportDir", + "ImportMode", + "IsLocalImport", + "MultiplePackageError", + "NoGoError", + "Package", + "ToolDir", + }, + "go/constant": []string{ + "BinaryOp", + "BitLen", + "Bool", + "BoolVal", + "Bytes", + "Compare", + "Complex", + "Denom", + "Float", + "Float32Val", + "Float64Val", + "Imag", + "Int", + "Int64Val", + "Kind", + "Make", + "MakeBool", + "MakeFloat64", + "MakeFromBytes", + "MakeFromLiteral", + "MakeImag", + "MakeInt64", + "MakeString", + "MakeUint64", + "MakeUnknown", + "Num", + "Real", + "Shift", + "Sign", + "String", + "StringVal", + "ToComplex", + "ToFloat", + "ToInt", + "Uint64Val", + "UnaryOp", + "Unknown", + "Val", + "Value", + }, + "go/doc": []string{ + "AllDecls", + "AllMethods", + "Example", + "Examples", + "Filter", + "Func", + "IllegalPrefixes", + "IsPredeclared", + "Mode", + "New", + "NewFromFiles", + "Note", + "Package", + "PreserveAST", + "Synopsis", + "ToHTML", + "ToText", + "Type", + "Value", + }, + "go/format": []string{ + "Node", + "Source", + }, + "go/importer": []string{ + "Default", + "For", + "ForCompiler", + "Lookup", + }, + "go/parser": []string{ + "AllErrors", + "DeclarationErrors", + "ImportsOnly", + "Mode", + "PackageClauseOnly", + "ParseComments", + "ParseDir", + "ParseExpr", + "ParseExprFrom", + "ParseFile", + "SpuriousErrors", + "Trace", + }, + "go/printer": []string{ + "CommentedNode", + "Config", + "Fprint", + "Mode", + "RawFormat", + "SourcePos", + "TabIndent", + "UseSpaces", + }, + "go/scanner": []string{ + "Error", + "ErrorHandler", + "ErrorList", + "Mode", + "PrintError", + "ScanComments", + "Scanner", + }, + "go/token": []string{ + "ADD", + "ADD_ASSIGN", + "AND", + "AND_ASSIGN", + "AND_NOT", + "AND_NOT_ASSIGN", + "ARROW", + "ASSIGN", + "BREAK", + "CASE", + "CHAN", + "CHAR", + "COLON", + "COMMA", + "COMMENT", + "CONST", + "CONTINUE", + "DEC", + "DEFAULT", + "DEFER", + "DEFINE", + "ELLIPSIS", + "ELSE", + "EOF", + "EQL", + "FALLTHROUGH", + "FLOAT", + "FOR", + "FUNC", + "File", + "FileSet", + "GEQ", + "GO", + "GOTO", + "GTR", + "HighestPrec", + "IDENT", + "IF", + "ILLEGAL", + "IMAG", + "IMPORT", + "INC", + "INT", + "INTERFACE", + "IsExported", + "IsIdentifier", + "IsKeyword", + "LAND", + "LBRACE", + "LBRACK", + "LEQ", + "LOR", + "LPAREN", + "LSS", + "Lookup", + "LowestPrec", + "MAP", + "MUL", + "MUL_ASSIGN", + "NEQ", + "NOT", + "NewFileSet", + "NoPos", + "OR", + "OR_ASSIGN", + "PACKAGE", + "PERIOD", + "Pos", + "Position", + "QUO", + "QUO_ASSIGN", + "RANGE", + "RBRACE", + "RBRACK", + "REM", + "REM_ASSIGN", + "RETURN", + "RPAREN", + "SELECT", + "SEMICOLON", + "SHL", + "SHL_ASSIGN", + "SHR", + "SHR_ASSIGN", + "STRING", + "STRUCT", + "SUB", + "SUB_ASSIGN", + "SWITCH", + "TYPE", + "Token", + "UnaryPrec", + "VAR", + "XOR", + "XOR_ASSIGN", + }, + "go/types": []string{ + "Array", + "AssertableTo", + "AssignableTo", + "Basic", + "BasicInfo", + "BasicKind", + "Bool", + "Builtin", + "Byte", + "Chan", + "ChanDir", + "CheckExpr", + "Checker", + "Comparable", + "Complex128", + "Complex64", + "Config", + "Const", + "ConvertibleTo", + "DefPredeclaredTestFuncs", + "Default", + "Error", + "Eval", + "ExprString", + "FieldVal", + "Float32", + "Float64", + "Func", + "Id", + "Identical", + "IdenticalIgnoreTags", + "Implements", + "ImportMode", + "Importer", + "ImporterFrom", + "Info", + "Initializer", + "Int", + "Int16", + "Int32", + "Int64", + "Int8", + "Interface", + "Invalid", + "IsBoolean", + "IsComplex", + "IsConstType", + "IsFloat", + "IsInteger", + "IsInterface", + "IsNumeric", + "IsOrdered", + "IsString", + "IsUnsigned", + "IsUntyped", + "Label", + "LookupFieldOrMethod", + "Map", + "MethodExpr", + "MethodSet", + "MethodVal", + "MissingMethod", + "Named", + "NewArray", + "NewChan", + "NewChecker", + "NewConst", + "NewField", + "NewFunc", + "NewInterface", + "NewInterfaceType", + "NewLabel", + "NewMap", + "NewMethodSet", + "NewNamed", + "NewPackage", + "NewParam", + "NewPkgName", + "NewPointer", + "NewScope", + "NewSignature", + "NewSlice", + "NewStruct", + "NewTuple", + "NewTypeName", + "NewVar", + "Nil", + "Object", + "ObjectString", + "Package", + "PkgName", + "Pointer", + "Qualifier", + "RecvOnly", + "RelativeTo", + "Rune", + "Scope", + "Selection", + "SelectionKind", + "SelectionString", + "SendOnly", + "SendRecv", + "Signature", + "Sizes", + "SizesFor", + "Slice", + "StdSizes", + "String", + "Struct", + "Tuple", + "Typ", + "Type", + "TypeAndValue", + "TypeName", + "TypeString", + "Uint", + "Uint16", + "Uint32", + "Uint64", + "Uint8", + "Uintptr", + "Universe", + "Unsafe", + "UnsafePointer", + "UntypedBool", + "UntypedComplex", + "UntypedFloat", + "UntypedInt", + "UntypedNil", + "UntypedRune", + "UntypedString", + "Var", + "WriteExpr", + "WriteSignature", + "WriteType", + }, + "hash": []string{ + "Hash", + "Hash32", + "Hash64", + }, + "hash/adler32": []string{ + "Checksum", + "New", + "Size", + }, + "hash/crc32": []string{ + "Castagnoli", + "Checksum", + "ChecksumIEEE", + "IEEE", + "IEEETable", + "Koopman", + "MakeTable", + "New", + "NewIEEE", + "Size", + "Table", + "Update", + }, + "hash/crc64": []string{ + "Checksum", + "ECMA", + "ISO", + "MakeTable", + "New", + "Size", + "Table", + "Update", + }, + "hash/fnv": []string{ + "New128", + "New128a", + "New32", + "New32a", + "New64", + "New64a", + }, + "hash/maphash": []string{ + "Hash", + "MakeSeed", + "Seed", + }, + "html": []string{ + "EscapeString", + "UnescapeString", + }, + "html/template": []string{ + "CSS", + "ErrAmbigContext", + "ErrBadHTML", + "ErrBranchEnd", + "ErrEndContext", + "ErrNoSuchTemplate", + "ErrOutputContext", + "ErrPartialCharset", + "ErrPartialEscape", + "ErrPredefinedEscaper", + "ErrRangeLoopReentry", + "ErrSlashAmbig", + "Error", + "ErrorCode", + "FuncMap", + "HTML", + "HTMLAttr", + "HTMLEscape", + "HTMLEscapeString", + "HTMLEscaper", + "IsTrue", + "JS", + "JSEscape", + "JSEscapeString", + "JSEscaper", + "JSStr", + "Must", + "New", + "OK", + "ParseFiles", + "ParseGlob", + "Srcset", + "Template", + "URL", + "URLQueryEscaper", + }, + "image": []string{ + "Alpha", + "Alpha16", + "Black", + "CMYK", + "Config", + "Decode", + "DecodeConfig", + "ErrFormat", + "Gray", + "Gray16", + "Image", + "NRGBA", + "NRGBA64", + "NYCbCrA", + "NewAlpha", + "NewAlpha16", + "NewCMYK", + "NewGray", + "NewGray16", + "NewNRGBA", + "NewNRGBA64", + "NewNYCbCrA", + "NewPaletted", + "NewRGBA", + "NewRGBA64", + "NewUniform", + "NewYCbCr", + "Opaque", + "Paletted", + "PalettedImage", + "Point", + "Pt", + "RGBA", + "RGBA64", + "Rect", + "Rectangle", + "RegisterFormat", + "Transparent", + "Uniform", + "White", + "YCbCr", + "YCbCrSubsampleRatio", + "YCbCrSubsampleRatio410", + "YCbCrSubsampleRatio411", + "YCbCrSubsampleRatio420", + "YCbCrSubsampleRatio422", + "YCbCrSubsampleRatio440", + "YCbCrSubsampleRatio444", + "ZP", + "ZR", + }, + "image/color": []string{ + "Alpha", + "Alpha16", + "Alpha16Model", + "AlphaModel", + "Black", + "CMYK", + "CMYKModel", + "CMYKToRGB", + "Color", + "Gray", + "Gray16", + "Gray16Model", + "GrayModel", + "Model", + "ModelFunc", + "NRGBA", + "NRGBA64", + "NRGBA64Model", + "NRGBAModel", + "NYCbCrA", + "NYCbCrAModel", + "Opaque", + "Palette", + "RGBA", + "RGBA64", + "RGBA64Model", + "RGBAModel", + "RGBToCMYK", + "RGBToYCbCr", + "Transparent", + "White", + "YCbCr", + "YCbCrModel", + "YCbCrToRGB", + }, + "image/color/palette": []string{ + "Plan9", + "WebSafe", + }, + "image/draw": []string{ + "Draw", + "DrawMask", + "Drawer", + "FloydSteinberg", + "Image", + "Op", + "Over", + "Quantizer", + "Src", + }, + "image/gif": []string{ + "Decode", + "DecodeAll", + "DecodeConfig", + "DisposalBackground", + "DisposalNone", + "DisposalPrevious", + "Encode", + "EncodeAll", + "GIF", + "Options", + }, + "image/jpeg": []string{ + "Decode", + "DecodeConfig", + "DefaultQuality", + "Encode", + "FormatError", + "Options", + "Reader", + "UnsupportedError", + }, + "image/png": []string{ + "BestCompression", + "BestSpeed", + "CompressionLevel", + "Decode", + "DecodeConfig", + "DefaultCompression", + "Encode", + "Encoder", + "EncoderBuffer", + "EncoderBufferPool", + "FormatError", + "NoCompression", + "UnsupportedError", + }, + "index/suffixarray": []string{ + "Index", + "New", + }, + "io": []string{ + "ByteReader", + "ByteScanner", + "ByteWriter", + "Closer", + "Copy", + "CopyBuffer", + "CopyN", + "EOF", + "ErrClosedPipe", + "ErrNoProgress", + "ErrShortBuffer", + "ErrShortWrite", + "ErrUnexpectedEOF", + "LimitReader", + "LimitedReader", + "MultiReader", + "MultiWriter", + "NewSectionReader", + "Pipe", + "PipeReader", + "PipeWriter", + "ReadAtLeast", + "ReadCloser", + "ReadFull", + "ReadSeeker", + "ReadWriteCloser", + "ReadWriteSeeker", + "ReadWriter", + "Reader", + "ReaderAt", + "ReaderFrom", + "RuneReader", + "RuneScanner", + "SectionReader", + "SeekCurrent", + "SeekEnd", + "SeekStart", + "Seeker", + "StringWriter", + "TeeReader", + "WriteCloser", + "WriteSeeker", + "WriteString", + "Writer", + "WriterAt", + "WriterTo", + }, + "io/ioutil": []string{ + "Discard", + "NopCloser", + "ReadAll", + "ReadDir", + "ReadFile", + "TempDir", + "TempFile", + "WriteFile", + }, + "log": []string{ + "Fatal", + "Fatalf", + "Fatalln", + "Flags", + "LUTC", + "Ldate", + "Llongfile", + "Lmicroseconds", + "Lmsgprefix", + "Logger", + "Lshortfile", + "LstdFlags", + "Ltime", + "New", + "Output", + "Panic", + "Panicf", + "Panicln", + "Prefix", + "Print", + "Printf", + "Println", + "SetFlags", + "SetOutput", + "SetPrefix", + "Writer", + }, + "log/syslog": []string{ + "Dial", + "LOG_ALERT", + "LOG_AUTH", + "LOG_AUTHPRIV", + "LOG_CRIT", + "LOG_CRON", + "LOG_DAEMON", + "LOG_DEBUG", + "LOG_EMERG", + "LOG_ERR", + "LOG_FTP", + "LOG_INFO", + "LOG_KERN", + "LOG_LOCAL0", + "LOG_LOCAL1", + "LOG_LOCAL2", + "LOG_LOCAL3", + "LOG_LOCAL4", + "LOG_LOCAL5", + "LOG_LOCAL6", + "LOG_LOCAL7", + "LOG_LPR", + "LOG_MAIL", + "LOG_NEWS", + "LOG_NOTICE", + "LOG_SYSLOG", + "LOG_USER", + "LOG_UUCP", + "LOG_WARNING", + "New", + "NewLogger", + "Priority", + "Writer", + }, + "math": []string{ + "Abs", + "Acos", + "Acosh", + "Asin", + "Asinh", + "Atan", + "Atan2", + "Atanh", + "Cbrt", + "Ceil", + "Copysign", + "Cos", + "Cosh", + "Dim", + "E", + "Erf", + "Erfc", + "Erfcinv", + "Erfinv", + "Exp", + "Exp2", + "Expm1", + "FMA", + "Float32bits", + "Float32frombits", + "Float64bits", + "Float64frombits", + "Floor", + "Frexp", + "Gamma", + "Hypot", + "Ilogb", + "Inf", + "IsInf", + "IsNaN", + "J0", + "J1", + "Jn", + "Ldexp", + "Lgamma", + "Ln10", + "Ln2", + "Log", + "Log10", + "Log10E", + "Log1p", + "Log2", + "Log2E", + "Logb", + "Max", + "MaxFloat32", + "MaxFloat64", + "MaxInt16", + "MaxInt32", + "MaxInt64", + "MaxInt8", + "MaxUint16", + "MaxUint32", + "MaxUint64", + "MaxUint8", + "Min", + "MinInt16", + "MinInt32", + "MinInt64", + "MinInt8", + "Mod", + "Modf", + "NaN", + "Nextafter", + "Nextafter32", + "Phi", + "Pi", + "Pow", + "Pow10", + "Remainder", + "Round", + "RoundToEven", + "Signbit", + "Sin", + "Sincos", + "Sinh", + "SmallestNonzeroFloat32", + "SmallestNonzeroFloat64", + "Sqrt", + "Sqrt2", + "SqrtE", + "SqrtPhi", + "SqrtPi", + "Tan", + "Tanh", + "Trunc", + "Y0", + "Y1", + "Yn", + }, + "math/big": []string{ + "Above", + "Accuracy", + "AwayFromZero", + "Below", + "ErrNaN", + "Exact", + "Float", + "Int", + "Jacobi", + "MaxBase", + "MaxExp", + "MaxPrec", + "MinExp", + "NewFloat", + "NewInt", + "NewRat", + "ParseFloat", + "Rat", + "RoundingMode", + "ToNearestAway", + "ToNearestEven", + "ToNegativeInf", + "ToPositiveInf", + "ToZero", + "Word", + }, + "math/bits": []string{ + "Add", + "Add32", + "Add64", + "Div", + "Div32", + "Div64", + "LeadingZeros", + "LeadingZeros16", + "LeadingZeros32", + "LeadingZeros64", + "LeadingZeros8", + "Len", + "Len16", + "Len32", + "Len64", + "Len8", + "Mul", + "Mul32", + "Mul64", + "OnesCount", + "OnesCount16", + "OnesCount32", + "OnesCount64", + "OnesCount8", + "Rem", + "Rem32", + "Rem64", + "Reverse", + "Reverse16", + "Reverse32", + "Reverse64", + "Reverse8", + "ReverseBytes", + "ReverseBytes16", + "ReverseBytes32", + "ReverseBytes64", + "RotateLeft", + "RotateLeft16", + "RotateLeft32", + "RotateLeft64", + "RotateLeft8", + "Sub", + "Sub32", + "Sub64", + "TrailingZeros", + "TrailingZeros16", + "TrailingZeros32", + "TrailingZeros64", + "TrailingZeros8", + "UintSize", + }, + "math/cmplx": []string{ + "Abs", + "Acos", + "Acosh", + "Asin", + "Asinh", + "Atan", + "Atanh", + "Conj", + "Cos", + "Cosh", + "Cot", + "Exp", + "Inf", + "IsInf", + "IsNaN", + "Log", + "Log10", + "NaN", + "Phase", + "Polar", + "Pow", + "Rect", + "Sin", + "Sinh", + "Sqrt", + "Tan", + "Tanh", + }, + "math/rand": []string{ + "ExpFloat64", + "Float32", + "Float64", + "Int", + "Int31", + "Int31n", + "Int63", + "Int63n", + "Intn", + "New", + "NewSource", + "NewZipf", + "NormFloat64", + "Perm", + "Rand", + "Read", + "Seed", + "Shuffle", + "Source", + "Source64", + "Uint32", + "Uint64", + "Zipf", + }, + "mime": []string{ + "AddExtensionType", + "BEncoding", + "ErrInvalidMediaParameter", + "ExtensionsByType", + "FormatMediaType", + "ParseMediaType", + "QEncoding", + "TypeByExtension", + "WordDecoder", + "WordEncoder", + }, + "mime/multipart": []string{ + "ErrMessageTooLarge", + "File", + "FileHeader", + "Form", + "NewReader", + "NewWriter", + "Part", + "Reader", + "Writer", + }, + "mime/quotedprintable": []string{ + "NewReader", + "NewWriter", + "Reader", + "Writer", + }, + "net": []string{ + "Addr", + "AddrError", + "Buffers", + "CIDRMask", + "Conn", + "DNSConfigError", + "DNSError", + "DefaultResolver", + "Dial", + "DialIP", + "DialTCP", + "DialTimeout", + "DialUDP", + "DialUnix", + "Dialer", + "ErrWriteToConnected", + "Error", + "FileConn", + "FileListener", + "FilePacketConn", + "FlagBroadcast", + "FlagLoopback", + "FlagMulticast", + "FlagPointToPoint", + "FlagUp", + "Flags", + "HardwareAddr", + "IP", + "IPAddr", + "IPConn", + "IPMask", + "IPNet", + "IPv4", + "IPv4Mask", + "IPv4allrouter", + "IPv4allsys", + "IPv4bcast", + "IPv4len", + "IPv4zero", + "IPv6interfacelocalallnodes", + "IPv6len", + "IPv6linklocalallnodes", + "IPv6linklocalallrouters", + "IPv6loopback", + "IPv6unspecified", + "IPv6zero", + "Interface", + "InterfaceAddrs", + "InterfaceByIndex", + "InterfaceByName", + "Interfaces", + "InvalidAddrError", + "JoinHostPort", + "Listen", + "ListenConfig", + "ListenIP", + "ListenMulticastUDP", + "ListenPacket", + "ListenTCP", + "ListenUDP", + "ListenUnix", + "ListenUnixgram", + "Listener", + "LookupAddr", + "LookupCNAME", + "LookupHost", + "LookupIP", + "LookupMX", + "LookupNS", + "LookupPort", + "LookupSRV", + "LookupTXT", + "MX", + "NS", + "OpError", + "PacketConn", + "ParseCIDR", + "ParseError", + "ParseIP", + "ParseMAC", + "Pipe", + "ResolveIPAddr", + "ResolveTCPAddr", + "ResolveUDPAddr", + "ResolveUnixAddr", + "Resolver", + "SRV", + "SplitHostPort", + "TCPAddr", + "TCPConn", + "TCPListener", + "UDPAddr", + "UDPConn", + "UnixAddr", + "UnixConn", + "UnixListener", + "UnknownNetworkError", + }, + "net/http": []string{ + "CanonicalHeaderKey", + "Client", + "CloseNotifier", + "ConnState", + "Cookie", + "CookieJar", + "DefaultClient", + "DefaultMaxHeaderBytes", + "DefaultMaxIdleConnsPerHost", + "DefaultServeMux", + "DefaultTransport", + "DetectContentType", + "Dir", + "ErrAbortHandler", + "ErrBodyNotAllowed", + "ErrBodyReadAfterClose", + "ErrContentLength", + "ErrHandlerTimeout", + "ErrHeaderTooLong", + "ErrHijacked", + "ErrLineTooLong", + "ErrMissingBoundary", + "ErrMissingContentLength", + "ErrMissingFile", + "ErrNoCookie", + "ErrNoLocation", + "ErrNotMultipart", + "ErrNotSupported", + "ErrServerClosed", + "ErrShortBody", + "ErrSkipAltProtocol", + "ErrUnexpectedTrailer", + "ErrUseLastResponse", + "ErrWriteAfterFlush", + "Error", + "File", + "FileServer", + "FileSystem", + "Flusher", + "Get", + "Handle", + "HandleFunc", + "Handler", + "HandlerFunc", + "Head", + "Header", + "Hijacker", + "ListenAndServe", + "ListenAndServeTLS", + "LocalAddrContextKey", + "MaxBytesReader", + "MethodConnect", + "MethodDelete", + "MethodGet", + "MethodHead", + "MethodOptions", + "MethodPatch", + "MethodPost", + "MethodPut", + "MethodTrace", + "NewFileTransport", + "NewRequest", + "NewRequestWithContext", + "NewServeMux", + "NoBody", + "NotFound", + "NotFoundHandler", + "ParseHTTPVersion", + "ParseTime", + "Post", + "PostForm", + "ProtocolError", + "ProxyFromEnvironment", + "ProxyURL", + "PushOptions", + "Pusher", + "ReadRequest", + "ReadResponse", + "Redirect", + "RedirectHandler", + "Request", + "Response", + "ResponseWriter", + "RoundTripper", + "SameSite", + "SameSiteDefaultMode", + "SameSiteLaxMode", + "SameSiteNoneMode", + "SameSiteStrictMode", + "Serve", + "ServeContent", + "ServeFile", + "ServeMux", + "ServeTLS", + "Server", + "ServerContextKey", + "SetCookie", + "StateActive", + "StateClosed", + "StateHijacked", + "StateIdle", + "StateNew", + "StatusAccepted", + "StatusAlreadyReported", + "StatusBadGateway", + "StatusBadRequest", + "StatusConflict", + "StatusContinue", + "StatusCreated", + "StatusEarlyHints", + "StatusExpectationFailed", + "StatusFailedDependency", + "StatusForbidden", + "StatusFound", + "StatusGatewayTimeout", + "StatusGone", + "StatusHTTPVersionNotSupported", + "StatusIMUsed", + "StatusInsufficientStorage", + "StatusInternalServerError", + "StatusLengthRequired", + "StatusLocked", + "StatusLoopDetected", + "StatusMethodNotAllowed", + "StatusMisdirectedRequest", + "StatusMovedPermanently", + "StatusMultiStatus", + "StatusMultipleChoices", + "StatusNetworkAuthenticationRequired", + "StatusNoContent", + "StatusNonAuthoritativeInfo", + "StatusNotAcceptable", + "StatusNotExtended", + "StatusNotFound", + "StatusNotImplemented", + "StatusNotModified", + "StatusOK", + "StatusPartialContent", + "StatusPaymentRequired", + "StatusPermanentRedirect", + "StatusPreconditionFailed", + "StatusPreconditionRequired", + "StatusProcessing", + "StatusProxyAuthRequired", + "StatusRequestEntityTooLarge", + "StatusRequestHeaderFieldsTooLarge", + "StatusRequestTimeout", + "StatusRequestURITooLong", + "StatusRequestedRangeNotSatisfiable", + "StatusResetContent", + "StatusSeeOther", + "StatusServiceUnavailable", + "StatusSwitchingProtocols", + "StatusTeapot", + "StatusTemporaryRedirect", + "StatusText", + "StatusTooEarly", + "StatusTooManyRequests", + "StatusUnauthorized", + "StatusUnavailableForLegalReasons", + "StatusUnprocessableEntity", + "StatusUnsupportedMediaType", + "StatusUpgradeRequired", + "StatusUseProxy", + "StatusVariantAlsoNegotiates", + "StripPrefix", + "TimeFormat", + "TimeoutHandler", + "TrailerPrefix", + "Transport", + }, + "net/http/cgi": []string{ + "Handler", + "Request", + "RequestFromMap", + "Serve", + }, + "net/http/cookiejar": []string{ + "Jar", + "New", + "Options", + "PublicSuffixList", + }, + "net/http/fcgi": []string{ + "ErrConnClosed", + "ErrRequestAborted", + "ProcessEnv", + "Serve", + }, + "net/http/httptest": []string{ + "DefaultRemoteAddr", + "NewRecorder", + "NewRequest", + "NewServer", + "NewTLSServer", + "NewUnstartedServer", + "ResponseRecorder", + "Server", + }, + "net/http/httptrace": []string{ + "ClientTrace", + "ContextClientTrace", + "DNSDoneInfo", + "DNSStartInfo", + "GotConnInfo", + "WithClientTrace", + "WroteRequestInfo", + }, + "net/http/httputil": []string{ + "BufferPool", + "ClientConn", + "DumpRequest", + "DumpRequestOut", + "DumpResponse", + "ErrClosed", + "ErrLineTooLong", + "ErrPersistEOF", + "ErrPipeline", + "NewChunkedReader", + "NewChunkedWriter", + "NewClientConn", + "NewProxyClientConn", + "NewServerConn", + "NewSingleHostReverseProxy", + "ReverseProxy", + "ServerConn", + }, + "net/http/pprof": []string{ + "Cmdline", + "Handler", + "Index", + "Profile", + "Symbol", + "Trace", + }, + "net/mail": []string{ + "Address", + "AddressParser", + "ErrHeaderNotPresent", + "Header", + "Message", + "ParseAddress", + "ParseAddressList", + "ParseDate", + "ReadMessage", + }, + "net/rpc": []string{ + "Accept", + "Call", + "Client", + "ClientCodec", + "DefaultDebugPath", + "DefaultRPCPath", + "DefaultServer", + "Dial", + "DialHTTP", + "DialHTTPPath", + "ErrShutdown", + "HandleHTTP", + "NewClient", + "NewClientWithCodec", + "NewServer", + "Register", + "RegisterName", + "Request", + "Response", + "ServeCodec", + "ServeConn", + "ServeRequest", + "Server", + "ServerCodec", + "ServerError", + }, + "net/rpc/jsonrpc": []string{ + "Dial", + "NewClient", + "NewClientCodec", + "NewServerCodec", + "ServeConn", + }, + "net/smtp": []string{ + "Auth", + "CRAMMD5Auth", + "Client", + "Dial", + "NewClient", + "PlainAuth", + "SendMail", + "ServerInfo", + }, + "net/textproto": []string{ + "CanonicalMIMEHeaderKey", + "Conn", + "Dial", + "Error", + "MIMEHeader", + "NewConn", + "NewReader", + "NewWriter", + "Pipeline", + "ProtocolError", + "Reader", + "TrimBytes", + "TrimString", + "Writer", + }, + "net/url": []string{ + "Error", + "EscapeError", + "InvalidHostError", + "Parse", + "ParseQuery", + "ParseRequestURI", + "PathEscape", + "PathUnescape", + "QueryEscape", + "QueryUnescape", + "URL", + "User", + "UserPassword", + "Userinfo", + "Values", + }, + "os": []string{ + "Args", + "Chdir", + "Chmod", + "Chown", + "Chtimes", + "Clearenv", + "Create", + "DevNull", + "Environ", + "ErrClosed", + "ErrExist", + "ErrInvalid", + "ErrNoDeadline", + "ErrNotExist", + "ErrPermission", + "Executable", + "Exit", + "Expand", + "ExpandEnv", + "File", + "FileInfo", + "FileMode", + "FindProcess", + "Getegid", + "Getenv", + "Geteuid", + "Getgid", + "Getgroups", + "Getpagesize", + "Getpid", + "Getppid", + "Getuid", + "Getwd", + "Hostname", + "Interrupt", + "IsExist", + "IsNotExist", + "IsPathSeparator", + "IsPermission", + "IsTimeout", + "Kill", + "Lchown", + "Link", + "LinkError", + "LookupEnv", + "Lstat", + "Mkdir", + "MkdirAll", + "ModeAppend", + "ModeCharDevice", + "ModeDevice", + "ModeDir", + "ModeExclusive", + "ModeIrregular", + "ModeNamedPipe", + "ModePerm", + "ModeSetgid", + "ModeSetuid", + "ModeSocket", + "ModeSticky", + "ModeSymlink", + "ModeTemporary", + "ModeType", + "NewFile", + "NewSyscallError", + "O_APPEND", + "O_CREATE", + "O_EXCL", + "O_RDONLY", + "O_RDWR", + "O_SYNC", + "O_TRUNC", + "O_WRONLY", + "Open", + "OpenFile", + "PathError", + "PathListSeparator", + "PathSeparator", + "Pipe", + "ProcAttr", + "Process", + "ProcessState", + "Readlink", + "Remove", + "RemoveAll", + "Rename", + "SEEK_CUR", + "SEEK_END", + "SEEK_SET", + "SameFile", + "Setenv", + "Signal", + "StartProcess", + "Stat", + "Stderr", + "Stdin", + "Stdout", + "Symlink", + "SyscallError", + "TempDir", + "Truncate", + "Unsetenv", + "UserCacheDir", + "UserConfigDir", + "UserHomeDir", + }, + "os/exec": []string{ + "Cmd", + "Command", + "CommandContext", + "ErrNotFound", + "Error", + "ExitError", + "LookPath", + }, + "os/signal": []string{ + "Ignore", + "Ignored", + "Notify", + "Reset", + "Stop", + }, + "os/user": []string{ + "Current", + "Group", + "Lookup", + "LookupGroup", + "LookupGroupId", + "LookupId", + "UnknownGroupError", + "UnknownGroupIdError", + "UnknownUserError", + "UnknownUserIdError", + "User", + }, + "path": []string{ + "Base", + "Clean", + "Dir", + "ErrBadPattern", + "Ext", + "IsAbs", + "Join", + "Match", + "Split", + }, + "path/filepath": []string{ + "Abs", + "Base", + "Clean", + "Dir", + "ErrBadPattern", + "EvalSymlinks", + "Ext", + "FromSlash", + "Glob", + "HasPrefix", + "IsAbs", + "Join", + "ListSeparator", + "Match", + "Rel", + "Separator", + "SkipDir", + "Split", + "SplitList", + "ToSlash", + "VolumeName", + "Walk", + "WalkFunc", + }, + "plugin": []string{ + "Open", + "Plugin", + "Symbol", + }, + "reflect": []string{ + "Append", + "AppendSlice", + "Array", + "ArrayOf", + "Bool", + "BothDir", + "Chan", + "ChanDir", + "ChanOf", + "Complex128", + "Complex64", + "Copy", + "DeepEqual", + "Float32", + "Float64", + "Func", + "FuncOf", + "Indirect", + "Int", + "Int16", + "Int32", + "Int64", + "Int8", + "Interface", + "Invalid", + "Kind", + "MakeChan", + "MakeFunc", + "MakeMap", + "MakeMapWithSize", + "MakeSlice", + "Map", + "MapIter", + "MapOf", + "Method", + "New", + "NewAt", + "Ptr", + "PtrTo", + "RecvDir", + "Select", + "SelectCase", + "SelectDefault", + "SelectDir", + "SelectRecv", + "SelectSend", + "SendDir", + "Slice", + "SliceHeader", + "SliceOf", + "String", + "StringHeader", + "Struct", + "StructField", + "StructOf", + "StructTag", + "Swapper", + "Type", + "TypeOf", + "Uint", + "Uint16", + "Uint32", + "Uint64", + "Uint8", + "Uintptr", + "UnsafePointer", + "Value", + "ValueError", + "ValueOf", + "Zero", + }, + "regexp": []string{ + "Compile", + "CompilePOSIX", + "Match", + "MatchReader", + "MatchString", + "MustCompile", + "MustCompilePOSIX", + "QuoteMeta", + "Regexp", + }, + "regexp/syntax": []string{ + "ClassNL", + "Compile", + "DotNL", + "EmptyBeginLine", + "EmptyBeginText", + "EmptyEndLine", + "EmptyEndText", + "EmptyNoWordBoundary", + "EmptyOp", + "EmptyOpContext", + "EmptyWordBoundary", + "ErrInternalError", + "ErrInvalidCharClass", + "ErrInvalidCharRange", + "ErrInvalidEscape", + "ErrInvalidNamedCapture", + "ErrInvalidPerlOp", + "ErrInvalidRepeatOp", + "ErrInvalidRepeatSize", + "ErrInvalidUTF8", + "ErrMissingBracket", + "ErrMissingParen", + "ErrMissingRepeatArgument", + "ErrTrailingBackslash", + "ErrUnexpectedParen", + "Error", + "ErrorCode", + "Flags", + "FoldCase", + "Inst", + "InstAlt", + "InstAltMatch", + "InstCapture", + "InstEmptyWidth", + "InstFail", + "InstMatch", + "InstNop", + "InstOp", + "InstRune", + "InstRune1", + "InstRuneAny", + "InstRuneAnyNotNL", + "IsWordChar", + "Literal", + "MatchNL", + "NonGreedy", + "OneLine", + "Op", + "OpAlternate", + "OpAnyChar", + "OpAnyCharNotNL", + "OpBeginLine", + "OpBeginText", + "OpCapture", + "OpCharClass", + "OpConcat", + "OpEmptyMatch", + "OpEndLine", + "OpEndText", + "OpLiteral", + "OpNoMatch", + "OpNoWordBoundary", + "OpPlus", + "OpQuest", + "OpRepeat", + "OpStar", + "OpWordBoundary", + "POSIX", + "Parse", + "Perl", + "PerlX", + "Prog", + "Regexp", + "Simple", + "UnicodeGroups", + "WasDollar", + }, + "runtime": []string{ + "BlockProfile", + "BlockProfileRecord", + "Breakpoint", + "CPUProfile", + "Caller", + "Callers", + "CallersFrames", + "Compiler", + "Error", + "Frame", + "Frames", + "Func", + "FuncForPC", + "GC", + "GOARCH", + "GOMAXPROCS", + "GOOS", + "GOROOT", + "Goexit", + "GoroutineProfile", + "Gosched", + "KeepAlive", + "LockOSThread", + "MemProfile", + "MemProfileRate", + "MemProfileRecord", + "MemStats", + "MutexProfile", + "NumCPU", + "NumCgoCall", + "NumGoroutine", + "ReadMemStats", + "ReadTrace", + "SetBlockProfileRate", + "SetCPUProfileRate", + "SetCgoTraceback", + "SetFinalizer", + "SetMutexProfileFraction", + "Stack", + "StackRecord", + "StartTrace", + "StopTrace", + "ThreadCreateProfile", + "TypeAssertionError", + "UnlockOSThread", + "Version", + }, + "runtime/debug": []string{ + "BuildInfo", + "FreeOSMemory", + "GCStats", + "Module", + "PrintStack", + "ReadBuildInfo", + "ReadGCStats", + "SetGCPercent", + "SetMaxStack", + "SetMaxThreads", + "SetPanicOnFault", + "SetTraceback", + "Stack", + "WriteHeapDump", + }, + "runtime/pprof": []string{ + "Do", + "ForLabels", + "Label", + "LabelSet", + "Labels", + "Lookup", + "NewProfile", + "Profile", + "Profiles", + "SetGoroutineLabels", + "StartCPUProfile", + "StopCPUProfile", + "WithLabels", + "WriteHeapProfile", + }, + "runtime/trace": []string{ + "IsEnabled", + "Log", + "Logf", + "NewTask", + "Region", + "Start", + "StartRegion", + "Stop", + "Task", + "WithRegion", + }, + "sort": []string{ + "Float64Slice", + "Float64s", + "Float64sAreSorted", + "IntSlice", + "Interface", + "Ints", + "IntsAreSorted", + "IsSorted", + "Reverse", + "Search", + "SearchFloat64s", + "SearchInts", + "SearchStrings", + "Slice", + "SliceIsSorted", + "SliceStable", + "Sort", + "Stable", + "StringSlice", + "Strings", + "StringsAreSorted", + }, + "strconv": []string{ + "AppendBool", + "AppendFloat", + "AppendInt", + "AppendQuote", + "AppendQuoteRune", + "AppendQuoteRuneToASCII", + "AppendQuoteRuneToGraphic", + "AppendQuoteToASCII", + "AppendQuoteToGraphic", + "AppendUint", + "Atoi", + "CanBackquote", + "ErrRange", + "ErrSyntax", + "FormatBool", + "FormatFloat", + "FormatInt", + "FormatUint", + "IntSize", + "IsGraphic", + "IsPrint", + "Itoa", + "NumError", + "ParseBool", + "ParseFloat", + "ParseInt", + "ParseUint", + "Quote", + "QuoteRune", + "QuoteRuneToASCII", + "QuoteRuneToGraphic", + "QuoteToASCII", + "QuoteToGraphic", + "Unquote", + "UnquoteChar", + }, + "strings": []string{ + "Builder", + "Compare", + "Contains", + "ContainsAny", + "ContainsRune", + "Count", + "EqualFold", + "Fields", + "FieldsFunc", + "HasPrefix", + "HasSuffix", + "Index", + "IndexAny", + "IndexByte", + "IndexFunc", + "IndexRune", + "Join", + "LastIndex", + "LastIndexAny", + "LastIndexByte", + "LastIndexFunc", + "Map", + "NewReader", + "NewReplacer", + "Reader", + "Repeat", + "Replace", + "ReplaceAll", + "Replacer", + "Split", + "SplitAfter", + "SplitAfterN", + "SplitN", + "Title", + "ToLower", + "ToLowerSpecial", + "ToTitle", + "ToTitleSpecial", + "ToUpper", + "ToUpperSpecial", + "ToValidUTF8", + "Trim", + "TrimFunc", + "TrimLeft", + "TrimLeftFunc", + "TrimPrefix", + "TrimRight", + "TrimRightFunc", + "TrimSpace", + "TrimSuffix", + }, + "sync": []string{ + "Cond", + "Locker", + "Map", + "Mutex", + "NewCond", + "Once", + "Pool", + "RWMutex", + "WaitGroup", + }, + "sync/atomic": []string{ + "AddInt32", + "AddInt64", + "AddUint32", + "AddUint64", + "AddUintptr", + "CompareAndSwapInt32", + "CompareAndSwapInt64", + "CompareAndSwapPointer", + "CompareAndSwapUint32", + "CompareAndSwapUint64", + "CompareAndSwapUintptr", + "LoadInt32", + "LoadInt64", + "LoadPointer", + "LoadUint32", + "LoadUint64", + "LoadUintptr", + "StoreInt32", + "StoreInt64", + "StorePointer", + "StoreUint32", + "StoreUint64", + "StoreUintptr", + "SwapInt32", + "SwapInt64", + "SwapPointer", + "SwapUint32", + "SwapUint64", + "SwapUintptr", + "Value", + }, + "syscall": []string{ + "AF_ALG", + "AF_APPLETALK", + "AF_ARP", + "AF_ASH", + "AF_ATM", + "AF_ATMPVC", + "AF_ATMSVC", + "AF_AX25", + "AF_BLUETOOTH", + "AF_BRIDGE", + "AF_CAIF", + "AF_CAN", + "AF_CCITT", + "AF_CHAOS", + "AF_CNT", + "AF_COIP", + "AF_DATAKIT", + "AF_DECnet", + "AF_DLI", + "AF_E164", + "AF_ECMA", + "AF_ECONET", + "AF_ENCAP", + "AF_FILE", + "AF_HYLINK", + "AF_IEEE80211", + "AF_IEEE802154", + "AF_IMPLINK", + "AF_INET", + "AF_INET6", + "AF_INET6_SDP", + "AF_INET_SDP", + "AF_IPX", + "AF_IRDA", + "AF_ISDN", + "AF_ISO", + "AF_IUCV", + "AF_KEY", + "AF_LAT", + "AF_LINK", + "AF_LLC", + "AF_LOCAL", + "AF_MAX", + "AF_MPLS", + "AF_NATM", + "AF_NDRV", + "AF_NETBEUI", + "AF_NETBIOS", + "AF_NETGRAPH", + "AF_NETLINK", + "AF_NETROM", + "AF_NS", + "AF_OROUTE", + "AF_OSI", + "AF_PACKET", + "AF_PHONET", + "AF_PPP", + "AF_PPPOX", + "AF_PUP", + "AF_RDS", + "AF_RESERVED_36", + "AF_ROSE", + "AF_ROUTE", + "AF_RXRPC", + "AF_SCLUSTER", + "AF_SECURITY", + "AF_SIP", + "AF_SLOW", + "AF_SNA", + "AF_SYSTEM", + "AF_TIPC", + "AF_UNIX", + "AF_UNSPEC", + "AF_VENDOR00", + "AF_VENDOR01", + "AF_VENDOR02", + "AF_VENDOR03", + "AF_VENDOR04", + "AF_VENDOR05", + "AF_VENDOR06", + "AF_VENDOR07", + "AF_VENDOR08", + "AF_VENDOR09", + "AF_VENDOR10", + "AF_VENDOR11", + "AF_VENDOR12", + "AF_VENDOR13", + "AF_VENDOR14", + "AF_VENDOR15", + "AF_VENDOR16", + "AF_VENDOR17", + "AF_VENDOR18", + "AF_VENDOR19", + "AF_VENDOR20", + "AF_VENDOR21", + "AF_VENDOR22", + "AF_VENDOR23", + "AF_VENDOR24", + "AF_VENDOR25", + "AF_VENDOR26", + "AF_VENDOR27", + "AF_VENDOR28", + "AF_VENDOR29", + "AF_VENDOR30", + "AF_VENDOR31", + "AF_VENDOR32", + "AF_VENDOR33", + "AF_VENDOR34", + "AF_VENDOR35", + "AF_VENDOR36", + "AF_VENDOR37", + "AF_VENDOR38", + "AF_VENDOR39", + "AF_VENDOR40", + "AF_VENDOR41", + "AF_VENDOR42", + "AF_VENDOR43", + "AF_VENDOR44", + "AF_VENDOR45", + "AF_VENDOR46", + "AF_VENDOR47", + "AF_WANPIPE", + "AF_X25", + "AI_CANONNAME", + "AI_NUMERICHOST", + "AI_PASSIVE", + "APPLICATION_ERROR", + "ARPHRD_ADAPT", + "ARPHRD_APPLETLK", + "ARPHRD_ARCNET", + "ARPHRD_ASH", + "ARPHRD_ATM", + "ARPHRD_AX25", + "ARPHRD_BIF", + "ARPHRD_CHAOS", + "ARPHRD_CISCO", + "ARPHRD_CSLIP", + "ARPHRD_CSLIP6", + "ARPHRD_DDCMP", + "ARPHRD_DLCI", + "ARPHRD_ECONET", + "ARPHRD_EETHER", + "ARPHRD_ETHER", + "ARPHRD_EUI64", + "ARPHRD_FCAL", + "ARPHRD_FCFABRIC", + "ARPHRD_FCPL", + "ARPHRD_FCPP", + "ARPHRD_FDDI", + "ARPHRD_FRAD", + "ARPHRD_FRELAY", + "ARPHRD_HDLC", + "ARPHRD_HIPPI", + "ARPHRD_HWX25", + "ARPHRD_IEEE1394", + "ARPHRD_IEEE802", + "ARPHRD_IEEE80211", + "ARPHRD_IEEE80211_PRISM", + "ARPHRD_IEEE80211_RADIOTAP", + "ARPHRD_IEEE802154", + "ARPHRD_IEEE802154_PHY", + "ARPHRD_IEEE802_TR", + "ARPHRD_INFINIBAND", + "ARPHRD_IPDDP", + "ARPHRD_IPGRE", + "ARPHRD_IRDA", + "ARPHRD_LAPB", + "ARPHRD_LOCALTLK", + "ARPHRD_LOOPBACK", + "ARPHRD_METRICOM", + "ARPHRD_NETROM", + "ARPHRD_NONE", + "ARPHRD_PIMREG", + "ARPHRD_PPP", + "ARPHRD_PRONET", + "ARPHRD_RAWHDLC", + "ARPHRD_ROSE", + "ARPHRD_RSRVD", + "ARPHRD_SIT", + "ARPHRD_SKIP", + "ARPHRD_SLIP", + "ARPHRD_SLIP6", + "ARPHRD_STRIP", + "ARPHRD_TUNNEL", + "ARPHRD_TUNNEL6", + "ARPHRD_VOID", + "ARPHRD_X25", + "AUTHTYPE_CLIENT", + "AUTHTYPE_SERVER", + "Accept", + "Accept4", + "AcceptEx", + "Access", + "Acct", + "AddrinfoW", + "Adjtime", + "Adjtimex", + "AttachLsf", + "B0", + "B1000000", + "B110", + "B115200", + "B1152000", + "B1200", + "B134", + "B14400", + "B150", + "B1500000", + "B1800", + "B19200", + "B200", + "B2000000", + "B230400", + "B2400", + "B2500000", + "B28800", + "B300", + "B3000000", + "B3500000", + "B38400", + "B4000000", + "B460800", + "B4800", + "B50", + "B500000", + "B57600", + "B576000", + "B600", + "B7200", + "B75", + "B76800", + "B921600", + "B9600", + "BASE_PROTOCOL", + "BIOCFEEDBACK", + "BIOCFLUSH", + "BIOCGBLEN", + "BIOCGDIRECTION", + "BIOCGDIRFILT", + "BIOCGDLT", + "BIOCGDLTLIST", + "BIOCGETBUFMODE", + "BIOCGETIF", + "BIOCGETZMAX", + "BIOCGFEEDBACK", + "BIOCGFILDROP", + "BIOCGHDRCMPLT", + "BIOCGRSIG", + "BIOCGRTIMEOUT", + "BIOCGSEESENT", + "BIOCGSTATS", + "BIOCGSTATSOLD", + "BIOCGTSTAMP", + "BIOCIMMEDIATE", + "BIOCLOCK", + "BIOCPROMISC", + "BIOCROTZBUF", + "BIOCSBLEN", + "BIOCSDIRECTION", + "BIOCSDIRFILT", + "BIOCSDLT", + "BIOCSETBUFMODE", + "BIOCSETF", + "BIOCSETFNR", + "BIOCSETIF", + "BIOCSETWF", + "BIOCSETZBUF", + "BIOCSFEEDBACK", + "BIOCSFILDROP", + "BIOCSHDRCMPLT", + "BIOCSRSIG", + "BIOCSRTIMEOUT", + "BIOCSSEESENT", + "BIOCSTCPF", + "BIOCSTSTAMP", + "BIOCSUDPF", + "BIOCVERSION", + "BPF_A", + "BPF_ABS", + "BPF_ADD", + "BPF_ALIGNMENT", + "BPF_ALIGNMENT32", + "BPF_ALU", + "BPF_AND", + "BPF_B", + "BPF_BUFMODE_BUFFER", + "BPF_BUFMODE_ZBUF", + "BPF_DFLTBUFSIZE", + "BPF_DIRECTION_IN", + "BPF_DIRECTION_OUT", + "BPF_DIV", + "BPF_H", + "BPF_IMM", + "BPF_IND", + "BPF_JA", + "BPF_JEQ", + "BPF_JGE", + "BPF_JGT", + "BPF_JMP", + "BPF_JSET", + "BPF_K", + "BPF_LD", + "BPF_LDX", + "BPF_LEN", + "BPF_LSH", + "BPF_MAJOR_VERSION", + "BPF_MAXBUFSIZE", + "BPF_MAXINSNS", + "BPF_MEM", + "BPF_MEMWORDS", + "BPF_MINBUFSIZE", + "BPF_MINOR_VERSION", + "BPF_MISC", + "BPF_MSH", + "BPF_MUL", + "BPF_NEG", + "BPF_OR", + "BPF_RELEASE", + "BPF_RET", + "BPF_RSH", + "BPF_ST", + "BPF_STX", + "BPF_SUB", + "BPF_TAX", + "BPF_TXA", + "BPF_T_BINTIME", + "BPF_T_BINTIME_FAST", + "BPF_T_BINTIME_MONOTONIC", + "BPF_T_BINTIME_MONOTONIC_FAST", + "BPF_T_FAST", + "BPF_T_FLAG_MASK", + "BPF_T_FORMAT_MASK", + "BPF_T_MICROTIME", + "BPF_T_MICROTIME_FAST", + "BPF_T_MICROTIME_MONOTONIC", + "BPF_T_MICROTIME_MONOTONIC_FAST", + "BPF_T_MONOTONIC", + "BPF_T_MONOTONIC_FAST", + "BPF_T_NANOTIME", + "BPF_T_NANOTIME_FAST", + "BPF_T_NANOTIME_MONOTONIC", + "BPF_T_NANOTIME_MONOTONIC_FAST", + "BPF_T_NONE", + "BPF_T_NORMAL", + "BPF_W", + "BPF_X", + "BRKINT", + "Bind", + "BindToDevice", + "BpfBuflen", + "BpfDatalink", + "BpfHdr", + "BpfHeadercmpl", + "BpfInsn", + "BpfInterface", + "BpfJump", + "BpfProgram", + "BpfStat", + "BpfStats", + "BpfStmt", + "BpfTimeout", + "BpfTimeval", + "BpfVersion", + "BpfZbuf", + "BpfZbufHeader", + "ByHandleFileInformation", + "BytePtrFromString", + "ByteSliceFromString", + "CCR0_FLUSH", + "CERT_CHAIN_POLICY_AUTHENTICODE", + "CERT_CHAIN_POLICY_AUTHENTICODE_TS", + "CERT_CHAIN_POLICY_BASE", + "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", + "CERT_CHAIN_POLICY_EV", + "CERT_CHAIN_POLICY_MICROSOFT_ROOT", + "CERT_CHAIN_POLICY_NT_AUTH", + "CERT_CHAIN_POLICY_SSL", + "CERT_E_CN_NO_MATCH", + "CERT_E_EXPIRED", + "CERT_E_PURPOSE", + "CERT_E_ROLE", + "CERT_E_UNTRUSTEDROOT", + "CERT_STORE_ADD_ALWAYS", + "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", + "CERT_STORE_PROV_MEMORY", + "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", + "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", + "CERT_TRUST_INVALID_BASIC_CONSTRAINTS", + "CERT_TRUST_INVALID_EXTENSION", + "CERT_TRUST_INVALID_NAME_CONSTRAINTS", + "CERT_TRUST_INVALID_POLICY_CONSTRAINTS", + "CERT_TRUST_IS_CYCLIC", + "CERT_TRUST_IS_EXPLICIT_DISTRUST", + "CERT_TRUST_IS_NOT_SIGNATURE_VALID", + "CERT_TRUST_IS_NOT_TIME_VALID", + "CERT_TRUST_IS_NOT_VALID_FOR_USAGE", + "CERT_TRUST_IS_OFFLINE_REVOCATION", + "CERT_TRUST_IS_REVOKED", + "CERT_TRUST_IS_UNTRUSTED_ROOT", + "CERT_TRUST_NO_ERROR", + "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", + "CERT_TRUST_REVOCATION_STATUS_UNKNOWN", + "CFLUSH", + "CLOCAL", + "CLONE_CHILD_CLEARTID", + "CLONE_CHILD_SETTID", + "CLONE_CSIGNAL", + "CLONE_DETACHED", + "CLONE_FILES", + "CLONE_FS", + "CLONE_IO", + "CLONE_NEWIPC", + "CLONE_NEWNET", + "CLONE_NEWNS", + "CLONE_NEWPID", + "CLONE_NEWUSER", + "CLONE_NEWUTS", + "CLONE_PARENT", + "CLONE_PARENT_SETTID", + "CLONE_PID", + "CLONE_PTRACE", + "CLONE_SETTLS", + "CLONE_SIGHAND", + "CLONE_SYSVSEM", + "CLONE_THREAD", + "CLONE_UNTRACED", + "CLONE_VFORK", + "CLONE_VM", + "CPUID_CFLUSH", + "CREAD", + "CREATE_ALWAYS", + "CREATE_NEW", + "CREATE_NEW_PROCESS_GROUP", + "CREATE_UNICODE_ENVIRONMENT", + "CRYPT_DEFAULT_CONTAINER_OPTIONAL", + "CRYPT_DELETEKEYSET", + "CRYPT_MACHINE_KEYSET", + "CRYPT_NEWKEYSET", + "CRYPT_SILENT", + "CRYPT_VERIFYCONTEXT", + "CS5", + "CS6", + "CS7", + "CS8", + "CSIZE", + "CSTART", + "CSTATUS", + "CSTOP", + "CSTOPB", + "CSUSP", + "CTL_MAXNAME", + "CTL_NET", + "CTL_QUERY", + "CTRL_BREAK_EVENT", + "CTRL_CLOSE_EVENT", + "CTRL_C_EVENT", + "CTRL_LOGOFF_EVENT", + "CTRL_SHUTDOWN_EVENT", + "CancelIo", + "CancelIoEx", + "CertAddCertificateContextToStore", + "CertChainContext", + "CertChainElement", + "CertChainPara", + "CertChainPolicyPara", + "CertChainPolicyStatus", + "CertCloseStore", + "CertContext", + "CertCreateCertificateContext", + "CertEnhKeyUsage", + "CertEnumCertificatesInStore", + "CertFreeCertificateChain", + "CertFreeCertificateContext", + "CertGetCertificateChain", + "CertInfo", + "CertOpenStore", + "CertOpenSystemStore", + "CertRevocationCrlInfo", + "CertRevocationInfo", + "CertSimpleChain", + "CertTrustListInfo", + "CertTrustStatus", + "CertUsageMatch", + "CertVerifyCertificateChainPolicy", + "Chdir", + "CheckBpfVersion", + "Chflags", + "Chmod", + "Chown", + "Chroot", + "Clearenv", + "Close", + "CloseHandle", + "CloseOnExec", + "Closesocket", + "CmsgLen", + "CmsgSpace", + "Cmsghdr", + "CommandLineToArgv", + "ComputerName", + "Conn", + "Connect", + "ConnectEx", + "ConvertSidToStringSid", + "ConvertStringSidToSid", + "CopySid", + "Creat", + "CreateDirectory", + "CreateFile", + "CreateFileMapping", + "CreateHardLink", + "CreateIoCompletionPort", + "CreatePipe", + "CreateProcess", + "CreateProcessAsUser", + "CreateSymbolicLink", + "CreateToolhelp32Snapshot", + "Credential", + "CryptAcquireContext", + "CryptGenRandom", + "CryptReleaseContext", + "DIOCBSFLUSH", + "DIOCOSFPFLUSH", + "DLL", + "DLLError", + "DLT_A429", + "DLT_A653_ICM", + "DLT_AIRONET_HEADER", + "DLT_AOS", + "DLT_APPLE_IP_OVER_IEEE1394", + "DLT_ARCNET", + "DLT_ARCNET_LINUX", + "DLT_ATM_CLIP", + "DLT_ATM_RFC1483", + "DLT_AURORA", + "DLT_AX25", + "DLT_AX25_KISS", + "DLT_BACNET_MS_TP", + "DLT_BLUETOOTH_HCI_H4", + "DLT_BLUETOOTH_HCI_H4_WITH_PHDR", + "DLT_CAN20B", + "DLT_CAN_SOCKETCAN", + "DLT_CHAOS", + "DLT_CHDLC", + "DLT_CISCO_IOS", + "DLT_C_HDLC", + "DLT_C_HDLC_WITH_DIR", + "DLT_DBUS", + "DLT_DECT", + "DLT_DOCSIS", + "DLT_DVB_CI", + "DLT_ECONET", + "DLT_EN10MB", + "DLT_EN3MB", + "DLT_ENC", + "DLT_ERF", + "DLT_ERF_ETH", + "DLT_ERF_POS", + "DLT_FC_2", + "DLT_FC_2_WITH_FRAME_DELIMS", + "DLT_FDDI", + "DLT_FLEXRAY", + "DLT_FRELAY", + "DLT_FRELAY_WITH_DIR", + "DLT_GCOM_SERIAL", + "DLT_GCOM_T1E1", + "DLT_GPF_F", + "DLT_GPF_T", + "DLT_GPRS_LLC", + "DLT_GSMTAP_ABIS", + "DLT_GSMTAP_UM", + "DLT_HDLC", + "DLT_HHDLC", + "DLT_HIPPI", + "DLT_IBM_SN", + "DLT_IBM_SP", + "DLT_IEEE802", + "DLT_IEEE802_11", + "DLT_IEEE802_11_RADIO", + "DLT_IEEE802_11_RADIO_AVS", + "DLT_IEEE802_15_4", + "DLT_IEEE802_15_4_LINUX", + "DLT_IEEE802_15_4_NOFCS", + "DLT_IEEE802_15_4_NONASK_PHY", + "DLT_IEEE802_16_MAC_CPS", + "DLT_IEEE802_16_MAC_CPS_RADIO", + "DLT_IPFILTER", + "DLT_IPMB", + "DLT_IPMB_LINUX", + "DLT_IPNET", + "DLT_IPOIB", + "DLT_IPV4", + "DLT_IPV6", + "DLT_IP_OVER_FC", + "DLT_JUNIPER_ATM1", + "DLT_JUNIPER_ATM2", + "DLT_JUNIPER_ATM_CEMIC", + "DLT_JUNIPER_CHDLC", + "DLT_JUNIPER_ES", + "DLT_JUNIPER_ETHER", + "DLT_JUNIPER_FIBRECHANNEL", + "DLT_JUNIPER_FRELAY", + "DLT_JUNIPER_GGSN", + "DLT_JUNIPER_ISM", + "DLT_JUNIPER_MFR", + "DLT_JUNIPER_MLFR", + "DLT_JUNIPER_MLPPP", + "DLT_JUNIPER_MONITOR", + "DLT_JUNIPER_PIC_PEER", + "DLT_JUNIPER_PPP", + "DLT_JUNIPER_PPPOE", + "DLT_JUNIPER_PPPOE_ATM", + "DLT_JUNIPER_SERVICES", + "DLT_JUNIPER_SRX_E2E", + "DLT_JUNIPER_ST", + "DLT_JUNIPER_VP", + "DLT_JUNIPER_VS", + "DLT_LAPB_WITH_DIR", + "DLT_LAPD", + "DLT_LIN", + "DLT_LINUX_EVDEV", + "DLT_LINUX_IRDA", + "DLT_LINUX_LAPD", + "DLT_LINUX_PPP_WITHDIRECTION", + "DLT_LINUX_SLL", + "DLT_LOOP", + "DLT_LTALK", + "DLT_MATCHING_MAX", + "DLT_MATCHING_MIN", + "DLT_MFR", + "DLT_MOST", + "DLT_MPEG_2_TS", + "DLT_MPLS", + "DLT_MTP2", + "DLT_MTP2_WITH_PHDR", + "DLT_MTP3", + "DLT_MUX27010", + "DLT_NETANALYZER", + "DLT_NETANALYZER_TRANSPARENT", + "DLT_NFC_LLCP", + "DLT_NFLOG", + "DLT_NG40", + "DLT_NULL", + "DLT_PCI_EXP", + "DLT_PFLOG", + "DLT_PFSYNC", + "DLT_PPI", + "DLT_PPP", + "DLT_PPP_BSDOS", + "DLT_PPP_ETHER", + "DLT_PPP_PPPD", + "DLT_PPP_SERIAL", + "DLT_PPP_WITH_DIR", + "DLT_PPP_WITH_DIRECTION", + "DLT_PRISM_HEADER", + "DLT_PRONET", + "DLT_RAIF1", + "DLT_RAW", + "DLT_RAWAF_MASK", + "DLT_RIO", + "DLT_SCCP", + "DLT_SITA", + "DLT_SLIP", + "DLT_SLIP_BSDOS", + "DLT_STANAG_5066_D_PDU", + "DLT_SUNATM", + "DLT_SYMANTEC_FIREWALL", + "DLT_TZSP", + "DLT_USB", + "DLT_USB_LINUX", + "DLT_USB_LINUX_MMAPPED", + "DLT_USER0", + "DLT_USER1", + "DLT_USER10", + "DLT_USER11", + "DLT_USER12", + "DLT_USER13", + "DLT_USER14", + "DLT_USER15", + "DLT_USER2", + "DLT_USER3", + "DLT_USER4", + "DLT_USER5", + "DLT_USER6", + "DLT_USER7", + "DLT_USER8", + "DLT_USER9", + "DLT_WIHART", + "DLT_X2E_SERIAL", + "DLT_X2E_XORAYA", + "DNSMXData", + "DNSPTRData", + "DNSRecord", + "DNSSRVData", + "DNSTXTData", + "DNS_INFO_NO_RECORDS", + "DNS_TYPE_A", + "DNS_TYPE_A6", + "DNS_TYPE_AAAA", + "DNS_TYPE_ADDRS", + "DNS_TYPE_AFSDB", + "DNS_TYPE_ALL", + "DNS_TYPE_ANY", + "DNS_TYPE_ATMA", + "DNS_TYPE_AXFR", + "DNS_TYPE_CERT", + "DNS_TYPE_CNAME", + "DNS_TYPE_DHCID", + "DNS_TYPE_DNAME", + "DNS_TYPE_DNSKEY", + "DNS_TYPE_DS", + "DNS_TYPE_EID", + "DNS_TYPE_GID", + "DNS_TYPE_GPOS", + "DNS_TYPE_HINFO", + "DNS_TYPE_ISDN", + "DNS_TYPE_IXFR", + "DNS_TYPE_KEY", + "DNS_TYPE_KX", + "DNS_TYPE_LOC", + "DNS_TYPE_MAILA", + "DNS_TYPE_MAILB", + "DNS_TYPE_MB", + "DNS_TYPE_MD", + "DNS_TYPE_MF", + "DNS_TYPE_MG", + "DNS_TYPE_MINFO", + "DNS_TYPE_MR", + "DNS_TYPE_MX", + "DNS_TYPE_NAPTR", + "DNS_TYPE_NBSTAT", + "DNS_TYPE_NIMLOC", + "DNS_TYPE_NS", + "DNS_TYPE_NSAP", + "DNS_TYPE_NSAPPTR", + "DNS_TYPE_NSEC", + "DNS_TYPE_NULL", + "DNS_TYPE_NXT", + "DNS_TYPE_OPT", + "DNS_TYPE_PTR", + "DNS_TYPE_PX", + "DNS_TYPE_RP", + "DNS_TYPE_RRSIG", + "DNS_TYPE_RT", + "DNS_TYPE_SIG", + "DNS_TYPE_SINK", + "DNS_TYPE_SOA", + "DNS_TYPE_SRV", + "DNS_TYPE_TEXT", + "DNS_TYPE_TKEY", + "DNS_TYPE_TSIG", + "DNS_TYPE_UID", + "DNS_TYPE_UINFO", + "DNS_TYPE_UNSPEC", + "DNS_TYPE_WINS", + "DNS_TYPE_WINSR", + "DNS_TYPE_WKS", + "DNS_TYPE_X25", + "DT_BLK", + "DT_CHR", + "DT_DIR", + "DT_FIFO", + "DT_LNK", + "DT_REG", + "DT_SOCK", + "DT_UNKNOWN", + "DT_WHT", + "DUPLICATE_CLOSE_SOURCE", + "DUPLICATE_SAME_ACCESS", + "DeleteFile", + "DetachLsf", + "DeviceIoControl", + "Dirent", + "DnsNameCompare", + "DnsQuery", + "DnsRecordListFree", + "DnsSectionAdditional", + "DnsSectionAnswer", + "DnsSectionAuthority", + "DnsSectionQuestion", + "Dup", + "Dup2", + "Dup3", + "DuplicateHandle", + "E2BIG", + "EACCES", + "EADDRINUSE", + "EADDRNOTAVAIL", + "EADV", + "EAFNOSUPPORT", + "EAGAIN", + "EALREADY", + "EAUTH", + "EBADARCH", + "EBADE", + "EBADEXEC", + "EBADF", + "EBADFD", + "EBADMACHO", + "EBADMSG", + "EBADR", + "EBADRPC", + "EBADRQC", + "EBADSLT", + "EBFONT", + "EBUSY", + "ECANCELED", + "ECAPMODE", + "ECHILD", + "ECHO", + "ECHOCTL", + "ECHOE", + "ECHOK", + "ECHOKE", + "ECHONL", + "ECHOPRT", + "ECHRNG", + "ECOMM", + "ECONNABORTED", + "ECONNREFUSED", + "ECONNRESET", + "EDEADLK", + "EDEADLOCK", + "EDESTADDRREQ", + "EDEVERR", + "EDOM", + "EDOOFUS", + "EDOTDOT", + "EDQUOT", + "EEXIST", + "EFAULT", + "EFBIG", + "EFER_LMA", + "EFER_LME", + "EFER_NXE", + "EFER_SCE", + "EFTYPE", + "EHOSTDOWN", + "EHOSTUNREACH", + "EHWPOISON", + "EIDRM", + "EILSEQ", + "EINPROGRESS", + "EINTR", + "EINVAL", + "EIO", + "EIPSEC", + "EISCONN", + "EISDIR", + "EISNAM", + "EKEYEXPIRED", + "EKEYREJECTED", + "EKEYREVOKED", + "EL2HLT", + "EL2NSYNC", + "EL3HLT", + "EL3RST", + "ELAST", + "ELF_NGREG", + "ELF_PRARGSZ", + "ELIBACC", + "ELIBBAD", + "ELIBEXEC", + "ELIBMAX", + "ELIBSCN", + "ELNRNG", + "ELOOP", + "EMEDIUMTYPE", + "EMFILE", + "EMLINK", + "EMSGSIZE", + "EMT_TAGOVF", + "EMULTIHOP", + "EMUL_ENABLED", + "EMUL_LINUX", + "EMUL_LINUX32", + "EMUL_MAXID", + "EMUL_NATIVE", + "ENAMETOOLONG", + "ENAVAIL", + "ENDRUNDISC", + "ENEEDAUTH", + "ENETDOWN", + "ENETRESET", + "ENETUNREACH", + "ENFILE", + "ENOANO", + "ENOATTR", + "ENOBUFS", + "ENOCSI", + "ENODATA", + "ENODEV", + "ENOENT", + "ENOEXEC", + "ENOKEY", + "ENOLCK", + "ENOLINK", + "ENOMEDIUM", + "ENOMEM", + "ENOMSG", + "ENONET", + "ENOPKG", + "ENOPOLICY", + "ENOPROTOOPT", + "ENOSPC", + "ENOSR", + "ENOSTR", + "ENOSYS", + "ENOTBLK", + "ENOTCAPABLE", + "ENOTCONN", + "ENOTDIR", + "ENOTEMPTY", + "ENOTNAM", + "ENOTRECOVERABLE", + "ENOTSOCK", + "ENOTSUP", + "ENOTTY", + "ENOTUNIQ", + "ENXIO", + "EN_SW_CTL_INF", + "EN_SW_CTL_PREC", + "EN_SW_CTL_ROUND", + "EN_SW_DATACHAIN", + "EN_SW_DENORM", + "EN_SW_INVOP", + "EN_SW_OVERFLOW", + "EN_SW_PRECLOSS", + "EN_SW_UNDERFLOW", + "EN_SW_ZERODIV", + "EOPNOTSUPP", + "EOVERFLOW", + "EOWNERDEAD", + "EPERM", + "EPFNOSUPPORT", + "EPIPE", + "EPOLLERR", + "EPOLLET", + "EPOLLHUP", + "EPOLLIN", + "EPOLLMSG", + "EPOLLONESHOT", + "EPOLLOUT", + "EPOLLPRI", + "EPOLLRDBAND", + "EPOLLRDHUP", + "EPOLLRDNORM", + "EPOLLWRBAND", + "EPOLLWRNORM", + "EPOLL_CLOEXEC", + "EPOLL_CTL_ADD", + "EPOLL_CTL_DEL", + "EPOLL_CTL_MOD", + "EPOLL_NONBLOCK", + "EPROCLIM", + "EPROCUNAVAIL", + "EPROGMISMATCH", + "EPROGUNAVAIL", + "EPROTO", + "EPROTONOSUPPORT", + "EPROTOTYPE", + "EPWROFF", + "ERANGE", + "EREMCHG", + "EREMOTE", + "EREMOTEIO", + "ERESTART", + "ERFKILL", + "EROFS", + "ERPCMISMATCH", + "ERROR_ACCESS_DENIED", + "ERROR_ALREADY_EXISTS", + "ERROR_BROKEN_PIPE", + "ERROR_BUFFER_OVERFLOW", + "ERROR_DIR_NOT_EMPTY", + "ERROR_ENVVAR_NOT_FOUND", + "ERROR_FILE_EXISTS", + "ERROR_FILE_NOT_FOUND", + "ERROR_HANDLE_EOF", + "ERROR_INSUFFICIENT_BUFFER", + "ERROR_IO_PENDING", + "ERROR_MOD_NOT_FOUND", + "ERROR_MORE_DATA", + "ERROR_NETNAME_DELETED", + "ERROR_NOT_FOUND", + "ERROR_NO_MORE_FILES", + "ERROR_OPERATION_ABORTED", + "ERROR_PATH_NOT_FOUND", + "ERROR_PRIVILEGE_NOT_HELD", + "ERROR_PROC_NOT_FOUND", + "ESHLIBVERS", + "ESHUTDOWN", + "ESOCKTNOSUPPORT", + "ESPIPE", + "ESRCH", + "ESRMNT", + "ESTALE", + "ESTRPIPE", + "ETHERCAP_JUMBO_MTU", + "ETHERCAP_VLAN_HWTAGGING", + "ETHERCAP_VLAN_MTU", + "ETHERMIN", + "ETHERMTU", + "ETHERMTU_JUMBO", + "ETHERTYPE_8023", + "ETHERTYPE_AARP", + "ETHERTYPE_ACCTON", + "ETHERTYPE_AEONIC", + "ETHERTYPE_ALPHA", + "ETHERTYPE_AMBER", + "ETHERTYPE_AMOEBA", + "ETHERTYPE_AOE", + "ETHERTYPE_APOLLO", + "ETHERTYPE_APOLLODOMAIN", + "ETHERTYPE_APPLETALK", + "ETHERTYPE_APPLITEK", + "ETHERTYPE_ARGONAUT", + "ETHERTYPE_ARP", + "ETHERTYPE_AT", + "ETHERTYPE_ATALK", + "ETHERTYPE_ATOMIC", + "ETHERTYPE_ATT", + "ETHERTYPE_ATTSTANFORD", + "ETHERTYPE_AUTOPHON", + "ETHERTYPE_AXIS", + "ETHERTYPE_BCLOOP", + "ETHERTYPE_BOFL", + "ETHERTYPE_CABLETRON", + "ETHERTYPE_CHAOS", + "ETHERTYPE_COMDESIGN", + "ETHERTYPE_COMPUGRAPHIC", + "ETHERTYPE_COUNTERPOINT", + "ETHERTYPE_CRONUS", + "ETHERTYPE_CRONUSVLN", + "ETHERTYPE_DCA", + "ETHERTYPE_DDE", + "ETHERTYPE_DEBNI", + "ETHERTYPE_DECAM", + "ETHERTYPE_DECCUST", + "ETHERTYPE_DECDIAG", + "ETHERTYPE_DECDNS", + "ETHERTYPE_DECDTS", + "ETHERTYPE_DECEXPER", + "ETHERTYPE_DECLAST", + "ETHERTYPE_DECLTM", + "ETHERTYPE_DECMUMPS", + "ETHERTYPE_DECNETBIOS", + "ETHERTYPE_DELTACON", + "ETHERTYPE_DIDDLE", + "ETHERTYPE_DLOG1", + "ETHERTYPE_DLOG2", + "ETHERTYPE_DN", + "ETHERTYPE_DOGFIGHT", + "ETHERTYPE_DSMD", + "ETHERTYPE_ECMA", + "ETHERTYPE_ENCRYPT", + "ETHERTYPE_ES", + "ETHERTYPE_EXCELAN", + "ETHERTYPE_EXPERDATA", + "ETHERTYPE_FLIP", + "ETHERTYPE_FLOWCONTROL", + "ETHERTYPE_FRARP", + "ETHERTYPE_GENDYN", + "ETHERTYPE_HAYES", + "ETHERTYPE_HIPPI_FP", + "ETHERTYPE_HITACHI", + "ETHERTYPE_HP", + "ETHERTYPE_IEEEPUP", + "ETHERTYPE_IEEEPUPAT", + "ETHERTYPE_IMLBL", + "ETHERTYPE_IMLBLDIAG", + "ETHERTYPE_IP", + "ETHERTYPE_IPAS", + "ETHERTYPE_IPV6", + "ETHERTYPE_IPX", + "ETHERTYPE_IPXNEW", + "ETHERTYPE_KALPANA", + "ETHERTYPE_LANBRIDGE", + "ETHERTYPE_LANPROBE", + "ETHERTYPE_LAT", + "ETHERTYPE_LBACK", + "ETHERTYPE_LITTLE", + "ETHERTYPE_LLDP", + "ETHERTYPE_LOGICRAFT", + "ETHERTYPE_LOOPBACK", + "ETHERTYPE_MATRA", + "ETHERTYPE_MAX", + "ETHERTYPE_MERIT", + "ETHERTYPE_MICP", + "ETHERTYPE_MOPDL", + "ETHERTYPE_MOPRC", + "ETHERTYPE_MOTOROLA", + "ETHERTYPE_MPLS", + "ETHERTYPE_MPLS_MCAST", + "ETHERTYPE_MUMPS", + "ETHERTYPE_NBPCC", + "ETHERTYPE_NBPCLAIM", + "ETHERTYPE_NBPCLREQ", + "ETHERTYPE_NBPCLRSP", + "ETHERTYPE_NBPCREQ", + "ETHERTYPE_NBPCRSP", + "ETHERTYPE_NBPDG", + "ETHERTYPE_NBPDGB", + "ETHERTYPE_NBPDLTE", + "ETHERTYPE_NBPRAR", + "ETHERTYPE_NBPRAS", + "ETHERTYPE_NBPRST", + "ETHERTYPE_NBPSCD", + "ETHERTYPE_NBPVCD", + "ETHERTYPE_NBS", + "ETHERTYPE_NCD", + "ETHERTYPE_NESTAR", + "ETHERTYPE_NETBEUI", + "ETHERTYPE_NOVELL", + "ETHERTYPE_NS", + "ETHERTYPE_NSAT", + "ETHERTYPE_NSCOMPAT", + "ETHERTYPE_NTRAILER", + "ETHERTYPE_OS9", + "ETHERTYPE_OS9NET", + "ETHERTYPE_PACER", + "ETHERTYPE_PAE", + "ETHERTYPE_PCS", + "ETHERTYPE_PLANNING", + "ETHERTYPE_PPP", + "ETHERTYPE_PPPOE", + "ETHERTYPE_PPPOEDISC", + "ETHERTYPE_PRIMENTS", + "ETHERTYPE_PUP", + "ETHERTYPE_PUPAT", + "ETHERTYPE_QINQ", + "ETHERTYPE_RACAL", + "ETHERTYPE_RATIONAL", + "ETHERTYPE_RAWFR", + "ETHERTYPE_RCL", + "ETHERTYPE_RDP", + "ETHERTYPE_RETIX", + "ETHERTYPE_REVARP", + "ETHERTYPE_SCA", + "ETHERTYPE_SECTRA", + "ETHERTYPE_SECUREDATA", + "ETHERTYPE_SGITW", + "ETHERTYPE_SG_BOUNCE", + "ETHERTYPE_SG_DIAG", + "ETHERTYPE_SG_NETGAMES", + "ETHERTYPE_SG_RESV", + "ETHERTYPE_SIMNET", + "ETHERTYPE_SLOW", + "ETHERTYPE_SLOWPROTOCOLS", + "ETHERTYPE_SNA", + "ETHERTYPE_SNMP", + "ETHERTYPE_SONIX", + "ETHERTYPE_SPIDER", + "ETHERTYPE_SPRITE", + "ETHERTYPE_STP", + "ETHERTYPE_TALARIS", + "ETHERTYPE_TALARISMC", + "ETHERTYPE_TCPCOMP", + "ETHERTYPE_TCPSM", + "ETHERTYPE_TEC", + "ETHERTYPE_TIGAN", + "ETHERTYPE_TRAIL", + "ETHERTYPE_TRANSETHER", + "ETHERTYPE_TYMSHARE", + "ETHERTYPE_UBBST", + "ETHERTYPE_UBDEBUG", + "ETHERTYPE_UBDIAGLOOP", + "ETHERTYPE_UBDL", + "ETHERTYPE_UBNIU", + "ETHERTYPE_UBNMC", + "ETHERTYPE_VALID", + "ETHERTYPE_VARIAN", + "ETHERTYPE_VAXELN", + "ETHERTYPE_VEECO", + "ETHERTYPE_VEXP", + "ETHERTYPE_VGLAB", + "ETHERTYPE_VINES", + "ETHERTYPE_VINESECHO", + "ETHERTYPE_VINESLOOP", + "ETHERTYPE_VITAL", + "ETHERTYPE_VLAN", + "ETHERTYPE_VLTLMAN", + "ETHERTYPE_VPROD", + "ETHERTYPE_VURESERVED", + "ETHERTYPE_WATERLOO", + "ETHERTYPE_WELLFLEET", + "ETHERTYPE_X25", + "ETHERTYPE_X75", + "ETHERTYPE_XNSSM", + "ETHERTYPE_XTP", + "ETHER_ADDR_LEN", + "ETHER_ALIGN", + "ETHER_CRC_LEN", + "ETHER_CRC_POLY_BE", + "ETHER_CRC_POLY_LE", + "ETHER_HDR_LEN", + "ETHER_MAX_DIX_LEN", + "ETHER_MAX_LEN", + "ETHER_MAX_LEN_JUMBO", + "ETHER_MIN_LEN", + "ETHER_PPPOE_ENCAP_LEN", + "ETHER_TYPE_LEN", + "ETHER_VLAN_ENCAP_LEN", + "ETH_P_1588", + "ETH_P_8021Q", + "ETH_P_802_2", + "ETH_P_802_3", + "ETH_P_AARP", + "ETH_P_ALL", + "ETH_P_AOE", + "ETH_P_ARCNET", + "ETH_P_ARP", + "ETH_P_ATALK", + "ETH_P_ATMFATE", + "ETH_P_ATMMPOA", + "ETH_P_AX25", + "ETH_P_BPQ", + "ETH_P_CAIF", + "ETH_P_CAN", + "ETH_P_CONTROL", + "ETH_P_CUST", + "ETH_P_DDCMP", + "ETH_P_DEC", + "ETH_P_DIAG", + "ETH_P_DNA_DL", + "ETH_P_DNA_RC", + "ETH_P_DNA_RT", + "ETH_P_DSA", + "ETH_P_ECONET", + "ETH_P_EDSA", + "ETH_P_FCOE", + "ETH_P_FIP", + "ETH_P_HDLC", + "ETH_P_IEEE802154", + "ETH_P_IEEEPUP", + "ETH_P_IEEEPUPAT", + "ETH_P_IP", + "ETH_P_IPV6", + "ETH_P_IPX", + "ETH_P_IRDA", + "ETH_P_LAT", + "ETH_P_LINK_CTL", + "ETH_P_LOCALTALK", + "ETH_P_LOOP", + "ETH_P_MOBITEX", + "ETH_P_MPLS_MC", + "ETH_P_MPLS_UC", + "ETH_P_PAE", + "ETH_P_PAUSE", + "ETH_P_PHONET", + "ETH_P_PPPTALK", + "ETH_P_PPP_DISC", + "ETH_P_PPP_MP", + "ETH_P_PPP_SES", + "ETH_P_PUP", + "ETH_P_PUPAT", + "ETH_P_RARP", + "ETH_P_SCA", + "ETH_P_SLOW", + "ETH_P_SNAP", + "ETH_P_TEB", + "ETH_P_TIPC", + "ETH_P_TRAILER", + "ETH_P_TR_802_2", + "ETH_P_WAN_PPP", + "ETH_P_WCCP", + "ETH_P_X25", + "ETIME", + "ETIMEDOUT", + "ETOOMANYREFS", + "ETXTBSY", + "EUCLEAN", + "EUNATCH", + "EUSERS", + "EVFILT_AIO", + "EVFILT_FS", + "EVFILT_LIO", + "EVFILT_MACHPORT", + "EVFILT_PROC", + "EVFILT_READ", + "EVFILT_SIGNAL", + "EVFILT_SYSCOUNT", + "EVFILT_THREADMARKER", + "EVFILT_TIMER", + "EVFILT_USER", + "EVFILT_VM", + "EVFILT_VNODE", + "EVFILT_WRITE", + "EV_ADD", + "EV_CLEAR", + "EV_DELETE", + "EV_DISABLE", + "EV_DISPATCH", + "EV_DROP", + "EV_ENABLE", + "EV_EOF", + "EV_ERROR", + "EV_FLAG0", + "EV_FLAG1", + "EV_ONESHOT", + "EV_OOBAND", + "EV_POLL", + "EV_RECEIPT", + "EV_SYSFLAGS", + "EWINDOWS", + "EWOULDBLOCK", + "EXDEV", + "EXFULL", + "EXTA", + "EXTB", + "EXTPROC", + "Environ", + "EpollCreate", + "EpollCreate1", + "EpollCtl", + "EpollEvent", + "EpollWait", + "Errno", + "EscapeArg", + "Exchangedata", + "Exec", + "Exit", + "ExitProcess", + "FD_CLOEXEC", + "FD_SETSIZE", + "FILE_ACTION_ADDED", + "FILE_ACTION_MODIFIED", + "FILE_ACTION_REMOVED", + "FILE_ACTION_RENAMED_NEW_NAME", + "FILE_ACTION_RENAMED_OLD_NAME", + "FILE_APPEND_DATA", + "FILE_ATTRIBUTE_ARCHIVE", + "FILE_ATTRIBUTE_DIRECTORY", + "FILE_ATTRIBUTE_HIDDEN", + "FILE_ATTRIBUTE_NORMAL", + "FILE_ATTRIBUTE_READONLY", + "FILE_ATTRIBUTE_REPARSE_POINT", + "FILE_ATTRIBUTE_SYSTEM", + "FILE_BEGIN", + "FILE_CURRENT", + "FILE_END", + "FILE_FLAG_BACKUP_SEMANTICS", + "FILE_FLAG_OPEN_REPARSE_POINT", + "FILE_FLAG_OVERLAPPED", + "FILE_LIST_DIRECTORY", + "FILE_MAP_COPY", + "FILE_MAP_EXECUTE", + "FILE_MAP_READ", + "FILE_MAP_WRITE", + "FILE_NOTIFY_CHANGE_ATTRIBUTES", + "FILE_NOTIFY_CHANGE_CREATION", + "FILE_NOTIFY_CHANGE_DIR_NAME", + "FILE_NOTIFY_CHANGE_FILE_NAME", + "FILE_NOTIFY_CHANGE_LAST_ACCESS", + "FILE_NOTIFY_CHANGE_LAST_WRITE", + "FILE_NOTIFY_CHANGE_SIZE", + "FILE_SHARE_DELETE", + "FILE_SHARE_READ", + "FILE_SHARE_WRITE", + "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", + "FILE_SKIP_SET_EVENT_ON_HANDLE", + "FILE_TYPE_CHAR", + "FILE_TYPE_DISK", + "FILE_TYPE_PIPE", + "FILE_TYPE_REMOTE", + "FILE_TYPE_UNKNOWN", + "FILE_WRITE_ATTRIBUTES", + "FLUSHO", + "FORMAT_MESSAGE_ALLOCATE_BUFFER", + "FORMAT_MESSAGE_ARGUMENT_ARRAY", + "FORMAT_MESSAGE_FROM_HMODULE", + "FORMAT_MESSAGE_FROM_STRING", + "FORMAT_MESSAGE_FROM_SYSTEM", + "FORMAT_MESSAGE_IGNORE_INSERTS", + "FORMAT_MESSAGE_MAX_WIDTH_MASK", + "FSCTL_GET_REPARSE_POINT", + "F_ADDFILESIGS", + "F_ADDSIGS", + "F_ALLOCATEALL", + "F_ALLOCATECONTIG", + "F_CANCEL", + "F_CHKCLEAN", + "F_CLOSEM", + "F_DUP2FD", + "F_DUP2FD_CLOEXEC", + "F_DUPFD", + "F_DUPFD_CLOEXEC", + "F_EXLCK", + "F_FLUSH_DATA", + "F_FREEZE_FS", + "F_FSCTL", + "F_FSDIRMASK", + "F_FSIN", + "F_FSINOUT", + "F_FSOUT", + "F_FSPRIV", + "F_FSVOID", + "F_FULLFSYNC", + "F_GETFD", + "F_GETFL", + "F_GETLEASE", + "F_GETLK", + "F_GETLK64", + "F_GETLKPID", + "F_GETNOSIGPIPE", + "F_GETOWN", + "F_GETOWN_EX", + "F_GETPATH", + "F_GETPATH_MTMINFO", + "F_GETPIPE_SZ", + "F_GETPROTECTIONCLASS", + "F_GETSIG", + "F_GLOBAL_NOCACHE", + "F_LOCK", + "F_LOG2PHYS", + "F_LOG2PHYS_EXT", + "F_MARKDEPENDENCY", + "F_MAXFD", + "F_NOCACHE", + "F_NODIRECT", + "F_NOTIFY", + "F_OGETLK", + "F_OK", + "F_OSETLK", + "F_OSETLKW", + "F_PARAM_MASK", + "F_PARAM_MAX", + "F_PATHPKG_CHECK", + "F_PEOFPOSMODE", + "F_PREALLOCATE", + "F_RDADVISE", + "F_RDAHEAD", + "F_RDLCK", + "F_READAHEAD", + "F_READBOOTSTRAP", + "F_SETBACKINGSTORE", + "F_SETFD", + "F_SETFL", + "F_SETLEASE", + "F_SETLK", + "F_SETLK64", + "F_SETLKW", + "F_SETLKW64", + "F_SETLK_REMOTE", + "F_SETNOSIGPIPE", + "F_SETOWN", + "F_SETOWN_EX", + "F_SETPIPE_SZ", + "F_SETPROTECTIONCLASS", + "F_SETSIG", + "F_SETSIZE", + "F_SHLCK", + "F_TEST", + "F_THAW_FS", + "F_TLOCK", + "F_ULOCK", + "F_UNLCK", + "F_UNLCKSYS", + "F_VOLPOSMODE", + "F_WRITEBOOTSTRAP", + "F_WRLCK", + "Faccessat", + "Fallocate", + "Fbootstraptransfer_t", + "Fchdir", + "Fchflags", + "Fchmod", + "Fchmodat", + "Fchown", + "Fchownat", + "FcntlFlock", + "FdSet", + "Fdatasync", + "FileNotifyInformation", + "Filetime", + "FindClose", + "FindFirstFile", + "FindNextFile", + "Flock", + "Flock_t", + "FlushBpf", + "FlushFileBuffers", + "FlushViewOfFile", + "ForkExec", + "ForkLock", + "FormatMessage", + "Fpathconf", + "FreeAddrInfoW", + "FreeEnvironmentStrings", + "FreeLibrary", + "Fsid", + "Fstat", + "Fstatat", + "Fstatfs", + "Fstore_t", + "Fsync", + "Ftruncate", + "FullPath", + "Futimes", + "Futimesat", + "GENERIC_ALL", + "GENERIC_EXECUTE", + "GENERIC_READ", + "GENERIC_WRITE", + "GUID", + "GetAcceptExSockaddrs", + "GetAdaptersInfo", + "GetAddrInfoW", + "GetCommandLine", + "GetComputerName", + "GetConsoleMode", + "GetCurrentDirectory", + "GetCurrentProcess", + "GetEnvironmentStrings", + "GetEnvironmentVariable", + "GetExitCodeProcess", + "GetFileAttributes", + "GetFileAttributesEx", + "GetFileExInfoStandard", + "GetFileExMaxInfoLevel", + "GetFileInformationByHandle", + "GetFileType", + "GetFullPathName", + "GetHostByName", + "GetIfEntry", + "GetLastError", + "GetLengthSid", + "GetLongPathName", + "GetProcAddress", + "GetProcessTimes", + "GetProtoByName", + "GetQueuedCompletionStatus", + "GetServByName", + "GetShortPathName", + "GetStartupInfo", + "GetStdHandle", + "GetSystemTimeAsFileTime", + "GetTempPath", + "GetTimeZoneInformation", + "GetTokenInformation", + "GetUserNameEx", + "GetUserProfileDirectory", + "GetVersion", + "Getcwd", + "Getdents", + "Getdirentries", + "Getdtablesize", + "Getegid", + "Getenv", + "Geteuid", + "Getfsstat", + "Getgid", + "Getgroups", + "Getpagesize", + "Getpeername", + "Getpgid", + "Getpgrp", + "Getpid", + "Getppid", + "Getpriority", + "Getrlimit", + "Getrusage", + "Getsid", + "Getsockname", + "Getsockopt", + "GetsockoptByte", + "GetsockoptICMPv6Filter", + "GetsockoptIPMreq", + "GetsockoptIPMreqn", + "GetsockoptIPv6MTUInfo", + "GetsockoptIPv6Mreq", + "GetsockoptInet4Addr", + "GetsockoptInt", + "GetsockoptUcred", + "Gettid", + "Gettimeofday", + "Getuid", + "Getwd", + "Getxattr", + "HANDLE_FLAG_INHERIT", + "HKEY_CLASSES_ROOT", + "HKEY_CURRENT_CONFIG", + "HKEY_CURRENT_USER", + "HKEY_DYN_DATA", + "HKEY_LOCAL_MACHINE", + "HKEY_PERFORMANCE_DATA", + "HKEY_USERS", + "HUPCL", + "Handle", + "Hostent", + "ICANON", + "ICMP6_FILTER", + "ICMPV6_FILTER", + "ICMPv6Filter", + "ICRNL", + "IEXTEN", + "IFAN_ARRIVAL", + "IFAN_DEPARTURE", + "IFA_ADDRESS", + "IFA_ANYCAST", + "IFA_BROADCAST", + "IFA_CACHEINFO", + "IFA_F_DADFAILED", + "IFA_F_DEPRECATED", + "IFA_F_HOMEADDRESS", + "IFA_F_NODAD", + "IFA_F_OPTIMISTIC", + "IFA_F_PERMANENT", + "IFA_F_SECONDARY", + "IFA_F_TEMPORARY", + "IFA_F_TENTATIVE", + "IFA_LABEL", + "IFA_LOCAL", + "IFA_MAX", + "IFA_MULTICAST", + "IFA_ROUTE", + "IFA_UNSPEC", + "IFF_ALLMULTI", + "IFF_ALTPHYS", + "IFF_AUTOMEDIA", + "IFF_BROADCAST", + "IFF_CANTCHANGE", + "IFF_CANTCONFIG", + "IFF_DEBUG", + "IFF_DRV_OACTIVE", + "IFF_DRV_RUNNING", + "IFF_DYING", + "IFF_DYNAMIC", + "IFF_LINK0", + "IFF_LINK1", + "IFF_LINK2", + "IFF_LOOPBACK", + "IFF_MASTER", + "IFF_MONITOR", + "IFF_MULTICAST", + "IFF_NOARP", + "IFF_NOTRAILERS", + "IFF_NO_PI", + "IFF_OACTIVE", + "IFF_ONE_QUEUE", + "IFF_POINTOPOINT", + "IFF_POINTTOPOINT", + "IFF_PORTSEL", + "IFF_PPROMISC", + "IFF_PROMISC", + "IFF_RENAMING", + "IFF_RUNNING", + "IFF_SIMPLEX", + "IFF_SLAVE", + "IFF_SMART", + "IFF_STATICARP", + "IFF_TAP", + "IFF_TUN", + "IFF_TUN_EXCL", + "IFF_UP", + "IFF_VNET_HDR", + "IFLA_ADDRESS", + "IFLA_BROADCAST", + "IFLA_COST", + "IFLA_IFALIAS", + "IFLA_IFNAME", + "IFLA_LINK", + "IFLA_LINKINFO", + "IFLA_LINKMODE", + "IFLA_MAP", + "IFLA_MASTER", + "IFLA_MAX", + "IFLA_MTU", + "IFLA_NET_NS_PID", + "IFLA_OPERSTATE", + "IFLA_PRIORITY", + "IFLA_PROTINFO", + "IFLA_QDISC", + "IFLA_STATS", + "IFLA_TXQLEN", + "IFLA_UNSPEC", + "IFLA_WEIGHT", + "IFLA_WIRELESS", + "IFNAMSIZ", + "IFT_1822", + "IFT_A12MPPSWITCH", + "IFT_AAL2", + "IFT_AAL5", + "IFT_ADSL", + "IFT_AFLANE8023", + "IFT_AFLANE8025", + "IFT_ARAP", + "IFT_ARCNET", + "IFT_ARCNETPLUS", + "IFT_ASYNC", + "IFT_ATM", + "IFT_ATMDXI", + "IFT_ATMFUNI", + "IFT_ATMIMA", + "IFT_ATMLOGICAL", + "IFT_ATMRADIO", + "IFT_ATMSUBINTERFACE", + "IFT_ATMVCIENDPT", + "IFT_ATMVIRTUAL", + "IFT_BGPPOLICYACCOUNTING", + "IFT_BLUETOOTH", + "IFT_BRIDGE", + "IFT_BSC", + "IFT_CARP", + "IFT_CCTEMUL", + "IFT_CELLULAR", + "IFT_CEPT", + "IFT_CES", + "IFT_CHANNEL", + "IFT_CNR", + "IFT_COFFEE", + "IFT_COMPOSITELINK", + "IFT_DCN", + "IFT_DIGITALPOWERLINE", + "IFT_DIGITALWRAPPEROVERHEADCHANNEL", + "IFT_DLSW", + "IFT_DOCSCABLEDOWNSTREAM", + "IFT_DOCSCABLEMACLAYER", + "IFT_DOCSCABLEUPSTREAM", + "IFT_DOCSCABLEUPSTREAMCHANNEL", + "IFT_DS0", + "IFT_DS0BUNDLE", + "IFT_DS1FDL", + "IFT_DS3", + "IFT_DTM", + "IFT_DUMMY", + "IFT_DVBASILN", + "IFT_DVBASIOUT", + "IFT_DVBRCCDOWNSTREAM", + "IFT_DVBRCCMACLAYER", + "IFT_DVBRCCUPSTREAM", + "IFT_ECONET", + "IFT_ENC", + "IFT_EON", + "IFT_EPLRS", + "IFT_ESCON", + "IFT_ETHER", + "IFT_FAITH", + "IFT_FAST", + "IFT_FASTETHER", + "IFT_FASTETHERFX", + "IFT_FDDI", + "IFT_FIBRECHANNEL", + "IFT_FRAMERELAYINTERCONNECT", + "IFT_FRAMERELAYMPI", + "IFT_FRDLCIENDPT", + "IFT_FRELAY", + "IFT_FRELAYDCE", + "IFT_FRF16MFRBUNDLE", + "IFT_FRFORWARD", + "IFT_G703AT2MB", + "IFT_G703AT64K", + "IFT_GIF", + "IFT_GIGABITETHERNET", + "IFT_GR303IDT", + "IFT_GR303RDT", + "IFT_H323GATEKEEPER", + "IFT_H323PROXY", + "IFT_HDH1822", + "IFT_HDLC", + "IFT_HDSL2", + "IFT_HIPERLAN2", + "IFT_HIPPI", + "IFT_HIPPIINTERFACE", + "IFT_HOSTPAD", + "IFT_HSSI", + "IFT_HY", + "IFT_IBM370PARCHAN", + "IFT_IDSL", + "IFT_IEEE1394", + "IFT_IEEE80211", + "IFT_IEEE80212", + "IFT_IEEE8023ADLAG", + "IFT_IFGSN", + "IFT_IMT", + "IFT_INFINIBAND", + "IFT_INTERLEAVE", + "IFT_IP", + "IFT_IPFORWARD", + "IFT_IPOVERATM", + "IFT_IPOVERCDLC", + "IFT_IPOVERCLAW", + "IFT_IPSWITCH", + "IFT_IPXIP", + "IFT_ISDN", + "IFT_ISDNBASIC", + "IFT_ISDNPRIMARY", + "IFT_ISDNS", + "IFT_ISDNU", + "IFT_ISO88022LLC", + "IFT_ISO88023", + "IFT_ISO88024", + "IFT_ISO88025", + "IFT_ISO88025CRFPINT", + "IFT_ISO88025DTR", + "IFT_ISO88025FIBER", + "IFT_ISO88026", + "IFT_ISUP", + "IFT_L2VLAN", + "IFT_L3IPVLAN", + "IFT_L3IPXVLAN", + "IFT_LAPB", + "IFT_LAPD", + "IFT_LAPF", + "IFT_LINEGROUP", + "IFT_LOCALTALK", + "IFT_LOOP", + "IFT_MEDIAMAILOVERIP", + "IFT_MFSIGLINK", + "IFT_MIOX25", + "IFT_MODEM", + "IFT_MPC", + "IFT_MPLS", + "IFT_MPLSTUNNEL", + "IFT_MSDSL", + "IFT_MVL", + "IFT_MYRINET", + "IFT_NFAS", + "IFT_NSIP", + "IFT_OPTICALCHANNEL", + "IFT_OPTICALTRANSPORT", + "IFT_OTHER", + "IFT_P10", + "IFT_P80", + "IFT_PARA", + "IFT_PDP", + "IFT_PFLOG", + "IFT_PFLOW", + "IFT_PFSYNC", + "IFT_PLC", + "IFT_PON155", + "IFT_PON622", + "IFT_POS", + "IFT_PPP", + "IFT_PPPMULTILINKBUNDLE", + "IFT_PROPATM", + "IFT_PROPBWAP2MP", + "IFT_PROPCNLS", + "IFT_PROPDOCSWIRELESSDOWNSTREAM", + "IFT_PROPDOCSWIRELESSMACLAYER", + "IFT_PROPDOCSWIRELESSUPSTREAM", + "IFT_PROPMUX", + "IFT_PROPVIRTUAL", + "IFT_PROPWIRELESSP2P", + "IFT_PTPSERIAL", + "IFT_PVC", + "IFT_Q2931", + "IFT_QLLC", + "IFT_RADIOMAC", + "IFT_RADSL", + "IFT_REACHDSL", + "IFT_RFC1483", + "IFT_RS232", + "IFT_RSRB", + "IFT_SDLC", + "IFT_SDSL", + "IFT_SHDSL", + "IFT_SIP", + "IFT_SIPSIG", + "IFT_SIPTG", + "IFT_SLIP", + "IFT_SMDSDXI", + "IFT_SMDSICIP", + "IFT_SONET", + "IFT_SONETOVERHEADCHANNEL", + "IFT_SONETPATH", + "IFT_SONETVT", + "IFT_SRP", + "IFT_SS7SIGLINK", + "IFT_STACKTOSTACK", + "IFT_STARLAN", + "IFT_STF", + "IFT_T1", + "IFT_TDLC", + "IFT_TELINK", + "IFT_TERMPAD", + "IFT_TR008", + "IFT_TRANSPHDLC", + "IFT_TUNNEL", + "IFT_ULTRA", + "IFT_USB", + "IFT_V11", + "IFT_V35", + "IFT_V36", + "IFT_V37", + "IFT_VDSL", + "IFT_VIRTUALIPADDRESS", + "IFT_VIRTUALTG", + "IFT_VOICEDID", + "IFT_VOICEEM", + "IFT_VOICEEMFGD", + "IFT_VOICEENCAP", + "IFT_VOICEFGDEANA", + "IFT_VOICEFXO", + "IFT_VOICEFXS", + "IFT_VOICEOVERATM", + "IFT_VOICEOVERCABLE", + "IFT_VOICEOVERFRAMERELAY", + "IFT_VOICEOVERIP", + "IFT_X213", + "IFT_X25", + "IFT_X25DDN", + "IFT_X25HUNTGROUP", + "IFT_X25MLP", + "IFT_X25PLE", + "IFT_XETHER", + "IGNBRK", + "IGNCR", + "IGNORE", + "IGNPAR", + "IMAXBEL", + "INFINITE", + "INLCR", + "INPCK", + "INVALID_FILE_ATTRIBUTES", + "IN_ACCESS", + "IN_ALL_EVENTS", + "IN_ATTRIB", + "IN_CLASSA_HOST", + "IN_CLASSA_MAX", + "IN_CLASSA_NET", + "IN_CLASSA_NSHIFT", + "IN_CLASSB_HOST", + "IN_CLASSB_MAX", + "IN_CLASSB_NET", + "IN_CLASSB_NSHIFT", + "IN_CLASSC_HOST", + "IN_CLASSC_NET", + "IN_CLASSC_NSHIFT", + "IN_CLASSD_HOST", + "IN_CLASSD_NET", + "IN_CLASSD_NSHIFT", + "IN_CLOEXEC", + "IN_CLOSE", + "IN_CLOSE_NOWRITE", + "IN_CLOSE_WRITE", + "IN_CREATE", + "IN_DELETE", + "IN_DELETE_SELF", + "IN_DONT_FOLLOW", + "IN_EXCL_UNLINK", + "IN_IGNORED", + "IN_ISDIR", + "IN_LINKLOCALNETNUM", + "IN_LOOPBACKNET", + "IN_MASK_ADD", + "IN_MODIFY", + "IN_MOVE", + "IN_MOVED_FROM", + "IN_MOVED_TO", + "IN_MOVE_SELF", + "IN_NONBLOCK", + "IN_ONESHOT", + "IN_ONLYDIR", + "IN_OPEN", + "IN_Q_OVERFLOW", + "IN_RFC3021_HOST", + "IN_RFC3021_MASK", + "IN_RFC3021_NET", + "IN_RFC3021_NSHIFT", + "IN_UNMOUNT", + "IOC_IN", + "IOC_INOUT", + "IOC_OUT", + "IOC_VENDOR", + "IOC_WS2", + "IO_REPARSE_TAG_SYMLINK", + "IPMreq", + "IPMreqn", + "IPPROTO_3PC", + "IPPROTO_ADFS", + "IPPROTO_AH", + "IPPROTO_AHIP", + "IPPROTO_APES", + "IPPROTO_ARGUS", + "IPPROTO_AX25", + "IPPROTO_BHA", + "IPPROTO_BLT", + "IPPROTO_BRSATMON", + "IPPROTO_CARP", + "IPPROTO_CFTP", + "IPPROTO_CHAOS", + "IPPROTO_CMTP", + "IPPROTO_COMP", + "IPPROTO_CPHB", + "IPPROTO_CPNX", + "IPPROTO_DCCP", + "IPPROTO_DDP", + "IPPROTO_DGP", + "IPPROTO_DIVERT", + "IPPROTO_DIVERT_INIT", + "IPPROTO_DIVERT_RESP", + "IPPROTO_DONE", + "IPPROTO_DSTOPTS", + "IPPROTO_EGP", + "IPPROTO_EMCON", + "IPPROTO_ENCAP", + "IPPROTO_EON", + "IPPROTO_ESP", + "IPPROTO_ETHERIP", + "IPPROTO_FRAGMENT", + "IPPROTO_GGP", + "IPPROTO_GMTP", + "IPPROTO_GRE", + "IPPROTO_HELLO", + "IPPROTO_HMP", + "IPPROTO_HOPOPTS", + "IPPROTO_ICMP", + "IPPROTO_ICMPV6", + "IPPROTO_IDP", + "IPPROTO_IDPR", + "IPPROTO_IDRP", + "IPPROTO_IGMP", + "IPPROTO_IGP", + "IPPROTO_IGRP", + "IPPROTO_IL", + "IPPROTO_INLSP", + "IPPROTO_INP", + "IPPROTO_IP", + "IPPROTO_IPCOMP", + "IPPROTO_IPCV", + "IPPROTO_IPEIP", + "IPPROTO_IPIP", + "IPPROTO_IPPC", + "IPPROTO_IPV4", + "IPPROTO_IPV6", + "IPPROTO_IPV6_ICMP", + "IPPROTO_IRTP", + "IPPROTO_KRYPTOLAN", + "IPPROTO_LARP", + "IPPROTO_LEAF1", + "IPPROTO_LEAF2", + "IPPROTO_MAX", + "IPPROTO_MAXID", + "IPPROTO_MEAS", + "IPPROTO_MH", + "IPPROTO_MHRP", + "IPPROTO_MICP", + "IPPROTO_MOBILE", + "IPPROTO_MPLS", + "IPPROTO_MTP", + "IPPROTO_MUX", + "IPPROTO_ND", + "IPPROTO_NHRP", + "IPPROTO_NONE", + "IPPROTO_NSP", + "IPPROTO_NVPII", + "IPPROTO_OLD_DIVERT", + "IPPROTO_OSPFIGP", + "IPPROTO_PFSYNC", + "IPPROTO_PGM", + "IPPROTO_PIGP", + "IPPROTO_PIM", + "IPPROTO_PRM", + "IPPROTO_PUP", + "IPPROTO_PVP", + "IPPROTO_RAW", + "IPPROTO_RCCMON", + "IPPROTO_RDP", + "IPPROTO_ROUTING", + "IPPROTO_RSVP", + "IPPROTO_RVD", + "IPPROTO_SATEXPAK", + "IPPROTO_SATMON", + "IPPROTO_SCCSP", + "IPPROTO_SCTP", + "IPPROTO_SDRP", + "IPPROTO_SEND", + "IPPROTO_SEP", + "IPPROTO_SKIP", + "IPPROTO_SPACER", + "IPPROTO_SRPC", + "IPPROTO_ST", + "IPPROTO_SVMTP", + "IPPROTO_SWIPE", + "IPPROTO_TCF", + "IPPROTO_TCP", + "IPPROTO_TLSP", + "IPPROTO_TP", + "IPPROTO_TPXX", + "IPPROTO_TRUNK1", + "IPPROTO_TRUNK2", + "IPPROTO_TTP", + "IPPROTO_UDP", + "IPPROTO_UDPLITE", + "IPPROTO_VINES", + "IPPROTO_VISA", + "IPPROTO_VMTP", + "IPPROTO_VRRP", + "IPPROTO_WBEXPAK", + "IPPROTO_WBMON", + "IPPROTO_WSN", + "IPPROTO_XNET", + "IPPROTO_XTP", + "IPV6_2292DSTOPTS", + "IPV6_2292HOPLIMIT", + "IPV6_2292HOPOPTS", + "IPV6_2292NEXTHOP", + "IPV6_2292PKTINFO", + "IPV6_2292PKTOPTIONS", + "IPV6_2292RTHDR", + "IPV6_ADDRFORM", + "IPV6_ADD_MEMBERSHIP", + "IPV6_AUTHHDR", + "IPV6_AUTH_LEVEL", + "IPV6_AUTOFLOWLABEL", + "IPV6_BINDANY", + "IPV6_BINDV6ONLY", + "IPV6_BOUND_IF", + "IPV6_CHECKSUM", + "IPV6_DEFAULT_MULTICAST_HOPS", + "IPV6_DEFAULT_MULTICAST_LOOP", + "IPV6_DEFHLIM", + "IPV6_DONTFRAG", + "IPV6_DROP_MEMBERSHIP", + "IPV6_DSTOPTS", + "IPV6_ESP_NETWORK_LEVEL", + "IPV6_ESP_TRANS_LEVEL", + "IPV6_FAITH", + "IPV6_FLOWINFO_MASK", + "IPV6_FLOWLABEL_MASK", + "IPV6_FRAGTTL", + "IPV6_FW_ADD", + "IPV6_FW_DEL", + "IPV6_FW_FLUSH", + "IPV6_FW_GET", + "IPV6_FW_ZERO", + "IPV6_HLIMDEC", + "IPV6_HOPLIMIT", + "IPV6_HOPOPTS", + "IPV6_IPCOMP_LEVEL", + "IPV6_IPSEC_POLICY", + "IPV6_JOIN_ANYCAST", + "IPV6_JOIN_GROUP", + "IPV6_LEAVE_ANYCAST", + "IPV6_LEAVE_GROUP", + "IPV6_MAXHLIM", + "IPV6_MAXOPTHDR", + "IPV6_MAXPACKET", + "IPV6_MAX_GROUP_SRC_FILTER", + "IPV6_MAX_MEMBERSHIPS", + "IPV6_MAX_SOCK_SRC_FILTER", + "IPV6_MIN_MEMBERSHIPS", + "IPV6_MMTU", + "IPV6_MSFILTER", + "IPV6_MTU", + "IPV6_MTU_DISCOVER", + "IPV6_MULTICAST_HOPS", + "IPV6_MULTICAST_IF", + "IPV6_MULTICAST_LOOP", + "IPV6_NEXTHOP", + "IPV6_OPTIONS", + "IPV6_PATHMTU", + "IPV6_PIPEX", + "IPV6_PKTINFO", + "IPV6_PMTUDISC_DO", + "IPV6_PMTUDISC_DONT", + "IPV6_PMTUDISC_PROBE", + "IPV6_PMTUDISC_WANT", + "IPV6_PORTRANGE", + "IPV6_PORTRANGE_DEFAULT", + "IPV6_PORTRANGE_HIGH", + "IPV6_PORTRANGE_LOW", + "IPV6_PREFER_TEMPADDR", + "IPV6_RECVDSTOPTS", + "IPV6_RECVDSTPORT", + "IPV6_RECVERR", + "IPV6_RECVHOPLIMIT", + "IPV6_RECVHOPOPTS", + "IPV6_RECVPATHMTU", + "IPV6_RECVPKTINFO", + "IPV6_RECVRTHDR", + "IPV6_RECVTCLASS", + "IPV6_ROUTER_ALERT", + "IPV6_RTABLE", + "IPV6_RTHDR", + "IPV6_RTHDRDSTOPTS", + "IPV6_RTHDR_LOOSE", + "IPV6_RTHDR_STRICT", + "IPV6_RTHDR_TYPE_0", + "IPV6_RXDSTOPTS", + "IPV6_RXHOPOPTS", + "IPV6_SOCKOPT_RESERVED1", + "IPV6_TCLASS", + "IPV6_UNICAST_HOPS", + "IPV6_USE_MIN_MTU", + "IPV6_V6ONLY", + "IPV6_VERSION", + "IPV6_VERSION_MASK", + "IPV6_XFRM_POLICY", + "IP_ADD_MEMBERSHIP", + "IP_ADD_SOURCE_MEMBERSHIP", + "IP_AUTH_LEVEL", + "IP_BINDANY", + "IP_BLOCK_SOURCE", + "IP_BOUND_IF", + "IP_DEFAULT_MULTICAST_LOOP", + "IP_DEFAULT_MULTICAST_TTL", + "IP_DF", + "IP_DIVERTFL", + "IP_DONTFRAG", + "IP_DROP_MEMBERSHIP", + "IP_DROP_SOURCE_MEMBERSHIP", + "IP_DUMMYNET3", + "IP_DUMMYNET_CONFIGURE", + "IP_DUMMYNET_DEL", + "IP_DUMMYNET_FLUSH", + "IP_DUMMYNET_GET", + "IP_EF", + "IP_ERRORMTU", + "IP_ESP_NETWORK_LEVEL", + "IP_ESP_TRANS_LEVEL", + "IP_FAITH", + "IP_FREEBIND", + "IP_FW3", + "IP_FW_ADD", + "IP_FW_DEL", + "IP_FW_FLUSH", + "IP_FW_GET", + "IP_FW_NAT_CFG", + "IP_FW_NAT_DEL", + "IP_FW_NAT_GET_CONFIG", + "IP_FW_NAT_GET_LOG", + "IP_FW_RESETLOG", + "IP_FW_TABLE_ADD", + "IP_FW_TABLE_DEL", + "IP_FW_TABLE_FLUSH", + "IP_FW_TABLE_GETSIZE", + "IP_FW_TABLE_LIST", + "IP_FW_ZERO", + "IP_HDRINCL", + "IP_IPCOMP_LEVEL", + "IP_IPSECFLOWINFO", + "IP_IPSEC_LOCAL_AUTH", + "IP_IPSEC_LOCAL_CRED", + "IP_IPSEC_LOCAL_ID", + "IP_IPSEC_POLICY", + "IP_IPSEC_REMOTE_AUTH", + "IP_IPSEC_REMOTE_CRED", + "IP_IPSEC_REMOTE_ID", + "IP_MAXPACKET", + "IP_MAX_GROUP_SRC_FILTER", + "IP_MAX_MEMBERSHIPS", + "IP_MAX_SOCK_MUTE_FILTER", + "IP_MAX_SOCK_SRC_FILTER", + "IP_MAX_SOURCE_FILTER", + "IP_MF", + "IP_MINFRAGSIZE", + "IP_MINTTL", + "IP_MIN_MEMBERSHIPS", + "IP_MSFILTER", + "IP_MSS", + "IP_MTU", + "IP_MTU_DISCOVER", + "IP_MULTICAST_IF", + "IP_MULTICAST_IFINDEX", + "IP_MULTICAST_LOOP", + "IP_MULTICAST_TTL", + "IP_MULTICAST_VIF", + "IP_NAT__XXX", + "IP_OFFMASK", + "IP_OLD_FW_ADD", + "IP_OLD_FW_DEL", + "IP_OLD_FW_FLUSH", + "IP_OLD_FW_GET", + "IP_OLD_FW_RESETLOG", + "IP_OLD_FW_ZERO", + "IP_ONESBCAST", + "IP_OPTIONS", + "IP_ORIGDSTADDR", + "IP_PASSSEC", + "IP_PIPEX", + "IP_PKTINFO", + "IP_PKTOPTIONS", + "IP_PMTUDISC", + "IP_PMTUDISC_DO", + "IP_PMTUDISC_DONT", + "IP_PMTUDISC_PROBE", + "IP_PMTUDISC_WANT", + "IP_PORTRANGE", + "IP_PORTRANGE_DEFAULT", + "IP_PORTRANGE_HIGH", + "IP_PORTRANGE_LOW", + "IP_RECVDSTADDR", + "IP_RECVDSTPORT", + "IP_RECVERR", + "IP_RECVIF", + "IP_RECVOPTS", + "IP_RECVORIGDSTADDR", + "IP_RECVPKTINFO", + "IP_RECVRETOPTS", + "IP_RECVRTABLE", + "IP_RECVTOS", + "IP_RECVTTL", + "IP_RETOPTS", + "IP_RF", + "IP_ROUTER_ALERT", + "IP_RSVP_OFF", + "IP_RSVP_ON", + "IP_RSVP_VIF_OFF", + "IP_RSVP_VIF_ON", + "IP_RTABLE", + "IP_SENDSRCADDR", + "IP_STRIPHDR", + "IP_TOS", + "IP_TRAFFIC_MGT_BACKGROUND", + "IP_TRANSPARENT", + "IP_TTL", + "IP_UNBLOCK_SOURCE", + "IP_XFRM_POLICY", + "IPv6MTUInfo", + "IPv6Mreq", + "ISIG", + "ISTRIP", + "IUCLC", + "IUTF8", + "IXANY", + "IXOFF", + "IXON", + "IfAddrmsg", + "IfAnnounceMsghdr", + "IfData", + "IfInfomsg", + "IfMsghdr", + "IfaMsghdr", + "IfmaMsghdr", + "IfmaMsghdr2", + "ImplementsGetwd", + "Inet4Pktinfo", + "Inet6Pktinfo", + "InotifyAddWatch", + "InotifyEvent", + "InotifyInit", + "InotifyInit1", + "InotifyRmWatch", + "InterfaceAddrMessage", + "InterfaceAnnounceMessage", + "InterfaceInfo", + "InterfaceMessage", + "InterfaceMulticastAddrMessage", + "InvalidHandle", + "Ioperm", + "Iopl", + "Iovec", + "IpAdapterInfo", + "IpAddrString", + "IpAddressString", + "IpMaskString", + "Issetugid", + "KEY_ALL_ACCESS", + "KEY_CREATE_LINK", + "KEY_CREATE_SUB_KEY", + "KEY_ENUMERATE_SUB_KEYS", + "KEY_EXECUTE", + "KEY_NOTIFY", + "KEY_QUERY_VALUE", + "KEY_READ", + "KEY_SET_VALUE", + "KEY_WOW64_32KEY", + "KEY_WOW64_64KEY", + "KEY_WRITE", + "Kevent", + "Kevent_t", + "Kill", + "Klogctl", + "Kqueue", + "LANG_ENGLISH", + "LAYERED_PROTOCOL", + "LCNT_OVERLOAD_FLUSH", + "LINUX_REBOOT_CMD_CAD_OFF", + "LINUX_REBOOT_CMD_CAD_ON", + "LINUX_REBOOT_CMD_HALT", + "LINUX_REBOOT_CMD_KEXEC", + "LINUX_REBOOT_CMD_POWER_OFF", + "LINUX_REBOOT_CMD_RESTART", + "LINUX_REBOOT_CMD_RESTART2", + "LINUX_REBOOT_CMD_SW_SUSPEND", + "LINUX_REBOOT_MAGIC1", + "LINUX_REBOOT_MAGIC2", + "LOCK_EX", + "LOCK_NB", + "LOCK_SH", + "LOCK_UN", + "LazyDLL", + "LazyProc", + "Lchown", + "Linger", + "Link", + "Listen", + "Listxattr", + "LoadCancelIoEx", + "LoadConnectEx", + "LoadCreateSymbolicLink", + "LoadDLL", + "LoadGetAddrInfo", + "LoadLibrary", + "LoadSetFileCompletionNotificationModes", + "LocalFree", + "Log2phys_t", + "LookupAccountName", + "LookupAccountSid", + "LookupSID", + "LsfJump", + "LsfSocket", + "LsfStmt", + "Lstat", + "MADV_AUTOSYNC", + "MADV_CAN_REUSE", + "MADV_CORE", + "MADV_DOFORK", + "MADV_DONTFORK", + "MADV_DONTNEED", + "MADV_FREE", + "MADV_FREE_REUSABLE", + "MADV_FREE_REUSE", + "MADV_HUGEPAGE", + "MADV_HWPOISON", + "MADV_MERGEABLE", + "MADV_NOCORE", + "MADV_NOHUGEPAGE", + "MADV_NORMAL", + "MADV_NOSYNC", + "MADV_PROTECT", + "MADV_RANDOM", + "MADV_REMOVE", + "MADV_SEQUENTIAL", + "MADV_SPACEAVAIL", + "MADV_UNMERGEABLE", + "MADV_WILLNEED", + "MADV_ZERO_WIRED_PAGES", + "MAP_32BIT", + "MAP_ALIGNED_SUPER", + "MAP_ALIGNMENT_16MB", + "MAP_ALIGNMENT_1TB", + "MAP_ALIGNMENT_256TB", + "MAP_ALIGNMENT_4GB", + "MAP_ALIGNMENT_64KB", + "MAP_ALIGNMENT_64PB", + "MAP_ALIGNMENT_MASK", + "MAP_ALIGNMENT_SHIFT", + "MAP_ANON", + "MAP_ANONYMOUS", + "MAP_COPY", + "MAP_DENYWRITE", + "MAP_EXECUTABLE", + "MAP_FILE", + "MAP_FIXED", + "MAP_FLAGMASK", + "MAP_GROWSDOWN", + "MAP_HASSEMAPHORE", + "MAP_HUGETLB", + "MAP_INHERIT", + "MAP_INHERIT_COPY", + "MAP_INHERIT_DEFAULT", + "MAP_INHERIT_DONATE_COPY", + "MAP_INHERIT_NONE", + "MAP_INHERIT_SHARE", + "MAP_JIT", + "MAP_LOCKED", + "MAP_NOCACHE", + "MAP_NOCORE", + "MAP_NOEXTEND", + "MAP_NONBLOCK", + "MAP_NORESERVE", + "MAP_NOSYNC", + "MAP_POPULATE", + "MAP_PREFAULT_READ", + "MAP_PRIVATE", + "MAP_RENAME", + "MAP_RESERVED0080", + "MAP_RESERVED0100", + "MAP_SHARED", + "MAP_STACK", + "MAP_TRYFIXED", + "MAP_TYPE", + "MAP_WIRED", + "MAXIMUM_REPARSE_DATA_BUFFER_SIZE", + "MAXLEN_IFDESCR", + "MAXLEN_PHYSADDR", + "MAX_ADAPTER_ADDRESS_LENGTH", + "MAX_ADAPTER_DESCRIPTION_LENGTH", + "MAX_ADAPTER_NAME_LENGTH", + "MAX_COMPUTERNAME_LENGTH", + "MAX_INTERFACE_NAME_LEN", + "MAX_LONG_PATH", + "MAX_PATH", + "MAX_PROTOCOL_CHAIN", + "MCL_CURRENT", + "MCL_FUTURE", + "MNT_DETACH", + "MNT_EXPIRE", + "MNT_FORCE", + "MSG_BCAST", + "MSG_CMSG_CLOEXEC", + "MSG_COMPAT", + "MSG_CONFIRM", + "MSG_CONTROLMBUF", + "MSG_CTRUNC", + "MSG_DONTROUTE", + "MSG_DONTWAIT", + "MSG_EOF", + "MSG_EOR", + "MSG_ERRQUEUE", + "MSG_FASTOPEN", + "MSG_FIN", + "MSG_FLUSH", + "MSG_HAVEMORE", + "MSG_HOLD", + "MSG_IOVUSRSPACE", + "MSG_LENUSRSPACE", + "MSG_MCAST", + "MSG_MORE", + "MSG_NAMEMBUF", + "MSG_NBIO", + "MSG_NEEDSA", + "MSG_NOSIGNAL", + "MSG_NOTIFICATION", + "MSG_OOB", + "MSG_PEEK", + "MSG_PROXY", + "MSG_RCVMORE", + "MSG_RST", + "MSG_SEND", + "MSG_SYN", + "MSG_TRUNC", + "MSG_TRYHARD", + "MSG_USERFLAGS", + "MSG_WAITALL", + "MSG_WAITFORONE", + "MSG_WAITSTREAM", + "MS_ACTIVE", + "MS_ASYNC", + "MS_BIND", + "MS_DEACTIVATE", + "MS_DIRSYNC", + "MS_INVALIDATE", + "MS_I_VERSION", + "MS_KERNMOUNT", + "MS_KILLPAGES", + "MS_MANDLOCK", + "MS_MGC_MSK", + "MS_MGC_VAL", + "MS_MOVE", + "MS_NOATIME", + "MS_NODEV", + "MS_NODIRATIME", + "MS_NOEXEC", + "MS_NOSUID", + "MS_NOUSER", + "MS_POSIXACL", + "MS_PRIVATE", + "MS_RDONLY", + "MS_REC", + "MS_RELATIME", + "MS_REMOUNT", + "MS_RMT_MASK", + "MS_SHARED", + "MS_SILENT", + "MS_SLAVE", + "MS_STRICTATIME", + "MS_SYNC", + "MS_SYNCHRONOUS", + "MS_UNBINDABLE", + "Madvise", + "MapViewOfFile", + "MaxTokenInfoClass", + "Mclpool", + "MibIfRow", + "Mkdir", + "Mkdirat", + "Mkfifo", + "Mknod", + "Mknodat", + "Mlock", + "Mlockall", + "Mmap", + "Mount", + "MoveFile", + "Mprotect", + "Msghdr", + "Munlock", + "Munlockall", + "Munmap", + "MustLoadDLL", + "NAME_MAX", + "NETLINK_ADD_MEMBERSHIP", + "NETLINK_AUDIT", + "NETLINK_BROADCAST_ERROR", + "NETLINK_CONNECTOR", + "NETLINK_DNRTMSG", + "NETLINK_DROP_MEMBERSHIP", + "NETLINK_ECRYPTFS", + "NETLINK_FIB_LOOKUP", + "NETLINK_FIREWALL", + "NETLINK_GENERIC", + "NETLINK_INET_DIAG", + "NETLINK_IP6_FW", + "NETLINK_ISCSI", + "NETLINK_KOBJECT_UEVENT", + "NETLINK_NETFILTER", + "NETLINK_NFLOG", + "NETLINK_NO_ENOBUFS", + "NETLINK_PKTINFO", + "NETLINK_RDMA", + "NETLINK_ROUTE", + "NETLINK_SCSITRANSPORT", + "NETLINK_SELINUX", + "NETLINK_UNUSED", + "NETLINK_USERSOCK", + "NETLINK_XFRM", + "NET_RT_DUMP", + "NET_RT_DUMP2", + "NET_RT_FLAGS", + "NET_RT_IFLIST", + "NET_RT_IFLIST2", + "NET_RT_IFLISTL", + "NET_RT_IFMALIST", + "NET_RT_MAXID", + "NET_RT_OIFLIST", + "NET_RT_OOIFLIST", + "NET_RT_STAT", + "NET_RT_STATS", + "NET_RT_TABLE", + "NET_RT_TRASH", + "NLA_ALIGNTO", + "NLA_F_NESTED", + "NLA_F_NET_BYTEORDER", + "NLA_HDRLEN", + "NLMSG_ALIGNTO", + "NLMSG_DONE", + "NLMSG_ERROR", + "NLMSG_HDRLEN", + "NLMSG_MIN_TYPE", + "NLMSG_NOOP", + "NLMSG_OVERRUN", + "NLM_F_ACK", + "NLM_F_APPEND", + "NLM_F_ATOMIC", + "NLM_F_CREATE", + "NLM_F_DUMP", + "NLM_F_ECHO", + "NLM_F_EXCL", + "NLM_F_MATCH", + "NLM_F_MULTI", + "NLM_F_REPLACE", + "NLM_F_REQUEST", + "NLM_F_ROOT", + "NOFLSH", + "NOTE_ABSOLUTE", + "NOTE_ATTRIB", + "NOTE_CHILD", + "NOTE_DELETE", + "NOTE_EOF", + "NOTE_EXEC", + "NOTE_EXIT", + "NOTE_EXITSTATUS", + "NOTE_EXTEND", + "NOTE_FFAND", + "NOTE_FFCOPY", + "NOTE_FFCTRLMASK", + "NOTE_FFLAGSMASK", + "NOTE_FFNOP", + "NOTE_FFOR", + "NOTE_FORK", + "NOTE_LINK", + "NOTE_LOWAT", + "NOTE_NONE", + "NOTE_NSECONDS", + "NOTE_PCTRLMASK", + "NOTE_PDATAMASK", + "NOTE_REAP", + "NOTE_RENAME", + "NOTE_RESOURCEEND", + "NOTE_REVOKE", + "NOTE_SECONDS", + "NOTE_SIGNAL", + "NOTE_TRACK", + "NOTE_TRACKERR", + "NOTE_TRIGGER", + "NOTE_TRUNCATE", + "NOTE_USECONDS", + "NOTE_VM_ERROR", + "NOTE_VM_PRESSURE", + "NOTE_VM_PRESSURE_SUDDEN_TERMINATE", + "NOTE_VM_PRESSURE_TERMINATE", + "NOTE_WRITE", + "NameCanonical", + "NameCanonicalEx", + "NameDisplay", + "NameDnsDomain", + "NameFullyQualifiedDN", + "NameSamCompatible", + "NameServicePrincipal", + "NameUniqueId", + "NameUnknown", + "NameUserPrincipal", + "Nanosleep", + "NetApiBufferFree", + "NetGetJoinInformation", + "NetSetupDomainName", + "NetSetupUnjoined", + "NetSetupUnknownStatus", + "NetSetupWorkgroupName", + "NetUserGetInfo", + "NetlinkMessage", + "NetlinkRIB", + "NetlinkRouteAttr", + "NetlinkRouteRequest", + "NewCallback", + "NewCallbackCDecl", + "NewLazyDLL", + "NlAttr", + "NlMsgerr", + "NlMsghdr", + "NsecToFiletime", + "NsecToTimespec", + "NsecToTimeval", + "Ntohs", + "OCRNL", + "OFDEL", + "OFILL", + "OFIOGETBMAP", + "OID_PKIX_KP_SERVER_AUTH", + "OID_SERVER_GATED_CRYPTO", + "OID_SGC_NETSCAPE", + "OLCUC", + "ONLCR", + "ONLRET", + "ONOCR", + "ONOEOT", + "OPEN_ALWAYS", + "OPEN_EXISTING", + "OPOST", + "O_ACCMODE", + "O_ALERT", + "O_ALT_IO", + "O_APPEND", + "O_ASYNC", + "O_CLOEXEC", + "O_CREAT", + "O_DIRECT", + "O_DIRECTORY", + "O_DSYNC", + "O_EVTONLY", + "O_EXCL", + "O_EXEC", + "O_EXLOCK", + "O_FSYNC", + "O_LARGEFILE", + "O_NDELAY", + "O_NOATIME", + "O_NOCTTY", + "O_NOFOLLOW", + "O_NONBLOCK", + "O_NOSIGPIPE", + "O_POPUP", + "O_RDONLY", + "O_RDWR", + "O_RSYNC", + "O_SHLOCK", + "O_SYMLINK", + "O_SYNC", + "O_TRUNC", + "O_TTY_INIT", + "O_WRONLY", + "Open", + "OpenCurrentProcessToken", + "OpenProcess", + "OpenProcessToken", + "Openat", + "Overlapped", + "PACKET_ADD_MEMBERSHIP", + "PACKET_BROADCAST", + "PACKET_DROP_MEMBERSHIP", + "PACKET_FASTROUTE", + "PACKET_HOST", + "PACKET_LOOPBACK", + "PACKET_MR_ALLMULTI", + "PACKET_MR_MULTICAST", + "PACKET_MR_PROMISC", + "PACKET_MULTICAST", + "PACKET_OTHERHOST", + "PACKET_OUTGOING", + "PACKET_RECV_OUTPUT", + "PACKET_RX_RING", + "PACKET_STATISTICS", + "PAGE_EXECUTE_READ", + "PAGE_EXECUTE_READWRITE", + "PAGE_EXECUTE_WRITECOPY", + "PAGE_READONLY", + "PAGE_READWRITE", + "PAGE_WRITECOPY", + "PARENB", + "PARMRK", + "PARODD", + "PENDIN", + "PFL_HIDDEN", + "PFL_MATCHES_PROTOCOL_ZERO", + "PFL_MULTIPLE_PROTO_ENTRIES", + "PFL_NETWORKDIRECT_PROVIDER", + "PFL_RECOMMENDED_PROTO_ENTRY", + "PF_FLUSH", + "PKCS_7_ASN_ENCODING", + "PMC5_PIPELINE_FLUSH", + "PRIO_PGRP", + "PRIO_PROCESS", + "PRIO_USER", + "PRI_IOFLUSH", + "PROCESS_QUERY_INFORMATION", + "PROCESS_TERMINATE", + "PROT_EXEC", + "PROT_GROWSDOWN", + "PROT_GROWSUP", + "PROT_NONE", + "PROT_READ", + "PROT_WRITE", + "PROV_DH_SCHANNEL", + "PROV_DSS", + "PROV_DSS_DH", + "PROV_EC_ECDSA_FULL", + "PROV_EC_ECDSA_SIG", + "PROV_EC_ECNRA_FULL", + "PROV_EC_ECNRA_SIG", + "PROV_FORTEZZA", + "PROV_INTEL_SEC", + "PROV_MS_EXCHANGE", + "PROV_REPLACE_OWF", + "PROV_RNG", + "PROV_RSA_AES", + "PROV_RSA_FULL", + "PROV_RSA_SCHANNEL", + "PROV_RSA_SIG", + "PROV_SPYRUS_LYNKS", + "PROV_SSL", + "PR_CAPBSET_DROP", + "PR_CAPBSET_READ", + "PR_CLEAR_SECCOMP_FILTER", + "PR_ENDIAN_BIG", + "PR_ENDIAN_LITTLE", + "PR_ENDIAN_PPC_LITTLE", + "PR_FPEMU_NOPRINT", + "PR_FPEMU_SIGFPE", + "PR_FP_EXC_ASYNC", + "PR_FP_EXC_DISABLED", + "PR_FP_EXC_DIV", + "PR_FP_EXC_INV", + "PR_FP_EXC_NONRECOV", + "PR_FP_EXC_OVF", + "PR_FP_EXC_PRECISE", + "PR_FP_EXC_RES", + "PR_FP_EXC_SW_ENABLE", + "PR_FP_EXC_UND", + "PR_GET_DUMPABLE", + "PR_GET_ENDIAN", + "PR_GET_FPEMU", + "PR_GET_FPEXC", + "PR_GET_KEEPCAPS", + "PR_GET_NAME", + "PR_GET_PDEATHSIG", + "PR_GET_SECCOMP", + "PR_GET_SECCOMP_FILTER", + "PR_GET_SECUREBITS", + "PR_GET_TIMERSLACK", + "PR_GET_TIMING", + "PR_GET_TSC", + "PR_GET_UNALIGN", + "PR_MCE_KILL", + "PR_MCE_KILL_CLEAR", + "PR_MCE_KILL_DEFAULT", + "PR_MCE_KILL_EARLY", + "PR_MCE_KILL_GET", + "PR_MCE_KILL_LATE", + "PR_MCE_KILL_SET", + "PR_SECCOMP_FILTER_EVENT", + "PR_SECCOMP_FILTER_SYSCALL", + "PR_SET_DUMPABLE", + "PR_SET_ENDIAN", + "PR_SET_FPEMU", + "PR_SET_FPEXC", + "PR_SET_KEEPCAPS", + "PR_SET_NAME", + "PR_SET_PDEATHSIG", + "PR_SET_PTRACER", + "PR_SET_SECCOMP", + "PR_SET_SECCOMP_FILTER", + "PR_SET_SECUREBITS", + "PR_SET_TIMERSLACK", + "PR_SET_TIMING", + "PR_SET_TSC", + "PR_SET_UNALIGN", + "PR_TASK_PERF_EVENTS_DISABLE", + "PR_TASK_PERF_EVENTS_ENABLE", + "PR_TIMING_STATISTICAL", + "PR_TIMING_TIMESTAMP", + "PR_TSC_ENABLE", + "PR_TSC_SIGSEGV", + "PR_UNALIGN_NOPRINT", + "PR_UNALIGN_SIGBUS", + "PTRACE_ARCH_PRCTL", + "PTRACE_ATTACH", + "PTRACE_CONT", + "PTRACE_DETACH", + "PTRACE_EVENT_CLONE", + "PTRACE_EVENT_EXEC", + "PTRACE_EVENT_EXIT", + "PTRACE_EVENT_FORK", + "PTRACE_EVENT_VFORK", + "PTRACE_EVENT_VFORK_DONE", + "PTRACE_GETCRUNCHREGS", + "PTRACE_GETEVENTMSG", + "PTRACE_GETFPREGS", + "PTRACE_GETFPXREGS", + "PTRACE_GETHBPREGS", + "PTRACE_GETREGS", + "PTRACE_GETREGSET", + "PTRACE_GETSIGINFO", + "PTRACE_GETVFPREGS", + "PTRACE_GETWMMXREGS", + "PTRACE_GET_THREAD_AREA", + "PTRACE_KILL", + "PTRACE_OLDSETOPTIONS", + "PTRACE_O_MASK", + "PTRACE_O_TRACECLONE", + "PTRACE_O_TRACEEXEC", + "PTRACE_O_TRACEEXIT", + "PTRACE_O_TRACEFORK", + "PTRACE_O_TRACESYSGOOD", + "PTRACE_O_TRACEVFORK", + "PTRACE_O_TRACEVFORKDONE", + "PTRACE_PEEKDATA", + "PTRACE_PEEKTEXT", + "PTRACE_PEEKUSR", + "PTRACE_POKEDATA", + "PTRACE_POKETEXT", + "PTRACE_POKEUSR", + "PTRACE_SETCRUNCHREGS", + "PTRACE_SETFPREGS", + "PTRACE_SETFPXREGS", + "PTRACE_SETHBPREGS", + "PTRACE_SETOPTIONS", + "PTRACE_SETREGS", + "PTRACE_SETREGSET", + "PTRACE_SETSIGINFO", + "PTRACE_SETVFPREGS", + "PTRACE_SETWMMXREGS", + "PTRACE_SET_SYSCALL", + "PTRACE_SET_THREAD_AREA", + "PTRACE_SINGLEBLOCK", + "PTRACE_SINGLESTEP", + "PTRACE_SYSCALL", + "PTRACE_SYSEMU", + "PTRACE_SYSEMU_SINGLESTEP", + "PTRACE_TRACEME", + "PT_ATTACH", + "PT_ATTACHEXC", + "PT_CONTINUE", + "PT_DATA_ADDR", + "PT_DENY_ATTACH", + "PT_DETACH", + "PT_FIRSTMACH", + "PT_FORCEQUOTA", + "PT_KILL", + "PT_MASK", + "PT_READ_D", + "PT_READ_I", + "PT_READ_U", + "PT_SIGEXC", + "PT_STEP", + "PT_TEXT_ADDR", + "PT_TEXT_END_ADDR", + "PT_THUPDATE", + "PT_TRACE_ME", + "PT_WRITE_D", + "PT_WRITE_I", + "PT_WRITE_U", + "ParseDirent", + "ParseNetlinkMessage", + "ParseNetlinkRouteAttr", + "ParseRoutingMessage", + "ParseRoutingSockaddr", + "ParseSocketControlMessage", + "ParseUnixCredentials", + "ParseUnixRights", + "PathMax", + "Pathconf", + "Pause", + "Pipe", + "Pipe2", + "PivotRoot", + "Pointer", + "PostQueuedCompletionStatus", + "Pread", + "Proc", + "ProcAttr", + "Process32First", + "Process32Next", + "ProcessEntry32", + "ProcessInformation", + "Protoent", + "PtraceAttach", + "PtraceCont", + "PtraceDetach", + "PtraceGetEventMsg", + "PtraceGetRegs", + "PtracePeekData", + "PtracePeekText", + "PtracePokeData", + "PtracePokeText", + "PtraceRegs", + "PtraceSetOptions", + "PtraceSetRegs", + "PtraceSingleStep", + "PtraceSyscall", + "Pwrite", + "REG_BINARY", + "REG_DWORD", + "REG_DWORD_BIG_ENDIAN", + "REG_DWORD_LITTLE_ENDIAN", + "REG_EXPAND_SZ", + "REG_FULL_RESOURCE_DESCRIPTOR", + "REG_LINK", + "REG_MULTI_SZ", + "REG_NONE", + "REG_QWORD", + "REG_QWORD_LITTLE_ENDIAN", + "REG_RESOURCE_LIST", + "REG_RESOURCE_REQUIREMENTS_LIST", + "REG_SZ", + "RLIMIT_AS", + "RLIMIT_CORE", + "RLIMIT_CPU", + "RLIMIT_DATA", + "RLIMIT_FSIZE", + "RLIMIT_NOFILE", + "RLIMIT_STACK", + "RLIM_INFINITY", + "RTAX_ADVMSS", + "RTAX_AUTHOR", + "RTAX_BRD", + "RTAX_CWND", + "RTAX_DST", + "RTAX_FEATURES", + "RTAX_FEATURE_ALLFRAG", + "RTAX_FEATURE_ECN", + "RTAX_FEATURE_SACK", + "RTAX_FEATURE_TIMESTAMP", + "RTAX_GATEWAY", + "RTAX_GENMASK", + "RTAX_HOPLIMIT", + "RTAX_IFA", + "RTAX_IFP", + "RTAX_INITCWND", + "RTAX_INITRWND", + "RTAX_LABEL", + "RTAX_LOCK", + "RTAX_MAX", + "RTAX_MTU", + "RTAX_NETMASK", + "RTAX_REORDERING", + "RTAX_RTO_MIN", + "RTAX_RTT", + "RTAX_RTTVAR", + "RTAX_SRC", + "RTAX_SRCMASK", + "RTAX_SSTHRESH", + "RTAX_TAG", + "RTAX_UNSPEC", + "RTAX_WINDOW", + "RTA_ALIGNTO", + "RTA_AUTHOR", + "RTA_BRD", + "RTA_CACHEINFO", + "RTA_DST", + "RTA_FLOW", + "RTA_GATEWAY", + "RTA_GENMASK", + "RTA_IFA", + "RTA_IFP", + "RTA_IIF", + "RTA_LABEL", + "RTA_MAX", + "RTA_METRICS", + "RTA_MULTIPATH", + "RTA_NETMASK", + "RTA_OIF", + "RTA_PREFSRC", + "RTA_PRIORITY", + "RTA_SRC", + "RTA_SRCMASK", + "RTA_TABLE", + "RTA_TAG", + "RTA_UNSPEC", + "RTCF_DIRECTSRC", + "RTCF_DOREDIRECT", + "RTCF_LOG", + "RTCF_MASQ", + "RTCF_NAT", + "RTCF_VALVE", + "RTF_ADDRCLASSMASK", + "RTF_ADDRCONF", + "RTF_ALLONLINK", + "RTF_ANNOUNCE", + "RTF_BLACKHOLE", + "RTF_BROADCAST", + "RTF_CACHE", + "RTF_CLONED", + "RTF_CLONING", + "RTF_CONDEMNED", + "RTF_DEFAULT", + "RTF_DELCLONE", + "RTF_DONE", + "RTF_DYNAMIC", + "RTF_FLOW", + "RTF_FMASK", + "RTF_GATEWAY", + "RTF_GWFLAG_COMPAT", + "RTF_HOST", + "RTF_IFREF", + "RTF_IFSCOPE", + "RTF_INTERFACE", + "RTF_IRTT", + "RTF_LINKRT", + "RTF_LLDATA", + "RTF_LLINFO", + "RTF_LOCAL", + "RTF_MASK", + "RTF_MODIFIED", + "RTF_MPATH", + "RTF_MPLS", + "RTF_MSS", + "RTF_MTU", + "RTF_MULTICAST", + "RTF_NAT", + "RTF_NOFORWARD", + "RTF_NONEXTHOP", + "RTF_NOPMTUDISC", + "RTF_PERMANENT_ARP", + "RTF_PINNED", + "RTF_POLICY", + "RTF_PRCLONING", + "RTF_PROTO1", + "RTF_PROTO2", + "RTF_PROTO3", + "RTF_REINSTATE", + "RTF_REJECT", + "RTF_RNH_LOCKED", + "RTF_SOURCE", + "RTF_SRC", + "RTF_STATIC", + "RTF_STICKY", + "RTF_THROW", + "RTF_TUNNEL", + "RTF_UP", + "RTF_USETRAILERS", + "RTF_WASCLONED", + "RTF_WINDOW", + "RTF_XRESOLVE", + "RTM_ADD", + "RTM_BASE", + "RTM_CHANGE", + "RTM_CHGADDR", + "RTM_DELACTION", + "RTM_DELADDR", + "RTM_DELADDRLABEL", + "RTM_DELETE", + "RTM_DELLINK", + "RTM_DELMADDR", + "RTM_DELNEIGH", + "RTM_DELQDISC", + "RTM_DELROUTE", + "RTM_DELRULE", + "RTM_DELTCLASS", + "RTM_DELTFILTER", + "RTM_DESYNC", + "RTM_F_CLONED", + "RTM_F_EQUALIZE", + "RTM_F_NOTIFY", + "RTM_F_PREFIX", + "RTM_GET", + "RTM_GET2", + "RTM_GETACTION", + "RTM_GETADDR", + "RTM_GETADDRLABEL", + "RTM_GETANYCAST", + "RTM_GETDCB", + "RTM_GETLINK", + "RTM_GETMULTICAST", + "RTM_GETNEIGH", + "RTM_GETNEIGHTBL", + "RTM_GETQDISC", + "RTM_GETROUTE", + "RTM_GETRULE", + "RTM_GETTCLASS", + "RTM_GETTFILTER", + "RTM_IEEE80211", + "RTM_IFANNOUNCE", + "RTM_IFINFO", + "RTM_IFINFO2", + "RTM_LLINFO_UPD", + "RTM_LOCK", + "RTM_LOSING", + "RTM_MAX", + "RTM_MAXSIZE", + "RTM_MISS", + "RTM_NEWACTION", + "RTM_NEWADDR", + "RTM_NEWADDRLABEL", + "RTM_NEWLINK", + "RTM_NEWMADDR", + "RTM_NEWMADDR2", + "RTM_NEWNDUSEROPT", + "RTM_NEWNEIGH", + "RTM_NEWNEIGHTBL", + "RTM_NEWPREFIX", + "RTM_NEWQDISC", + "RTM_NEWROUTE", + "RTM_NEWRULE", + "RTM_NEWTCLASS", + "RTM_NEWTFILTER", + "RTM_NR_FAMILIES", + "RTM_NR_MSGTYPES", + "RTM_OIFINFO", + "RTM_OLDADD", + "RTM_OLDDEL", + "RTM_OOIFINFO", + "RTM_REDIRECT", + "RTM_RESOLVE", + "RTM_RTTUNIT", + "RTM_SETDCB", + "RTM_SETGATE", + "RTM_SETLINK", + "RTM_SETNEIGHTBL", + "RTM_VERSION", + "RTNH_ALIGNTO", + "RTNH_F_DEAD", + "RTNH_F_ONLINK", + "RTNH_F_PERVASIVE", + "RTNLGRP_IPV4_IFADDR", + "RTNLGRP_IPV4_MROUTE", + "RTNLGRP_IPV4_ROUTE", + "RTNLGRP_IPV4_RULE", + "RTNLGRP_IPV6_IFADDR", + "RTNLGRP_IPV6_IFINFO", + "RTNLGRP_IPV6_MROUTE", + "RTNLGRP_IPV6_PREFIX", + "RTNLGRP_IPV6_ROUTE", + "RTNLGRP_IPV6_RULE", + "RTNLGRP_LINK", + "RTNLGRP_ND_USEROPT", + "RTNLGRP_NEIGH", + "RTNLGRP_NONE", + "RTNLGRP_NOTIFY", + "RTNLGRP_TC", + "RTN_ANYCAST", + "RTN_BLACKHOLE", + "RTN_BROADCAST", + "RTN_LOCAL", + "RTN_MAX", + "RTN_MULTICAST", + "RTN_NAT", + "RTN_PROHIBIT", + "RTN_THROW", + "RTN_UNICAST", + "RTN_UNREACHABLE", + "RTN_UNSPEC", + "RTN_XRESOLVE", + "RTPROT_BIRD", + "RTPROT_BOOT", + "RTPROT_DHCP", + "RTPROT_DNROUTED", + "RTPROT_GATED", + "RTPROT_KERNEL", + "RTPROT_MRT", + "RTPROT_NTK", + "RTPROT_RA", + "RTPROT_REDIRECT", + "RTPROT_STATIC", + "RTPROT_UNSPEC", + "RTPROT_XORP", + "RTPROT_ZEBRA", + "RTV_EXPIRE", + "RTV_HOPCOUNT", + "RTV_MTU", + "RTV_RPIPE", + "RTV_RTT", + "RTV_RTTVAR", + "RTV_SPIPE", + "RTV_SSTHRESH", + "RTV_WEIGHT", + "RT_CACHING_CONTEXT", + "RT_CLASS_DEFAULT", + "RT_CLASS_LOCAL", + "RT_CLASS_MAIN", + "RT_CLASS_MAX", + "RT_CLASS_UNSPEC", + "RT_DEFAULT_FIB", + "RT_NORTREF", + "RT_SCOPE_HOST", + "RT_SCOPE_LINK", + "RT_SCOPE_NOWHERE", + "RT_SCOPE_SITE", + "RT_SCOPE_UNIVERSE", + "RT_TABLEID_MAX", + "RT_TABLE_COMPAT", + "RT_TABLE_DEFAULT", + "RT_TABLE_LOCAL", + "RT_TABLE_MAIN", + "RT_TABLE_MAX", + "RT_TABLE_UNSPEC", + "RUSAGE_CHILDREN", + "RUSAGE_SELF", + "RUSAGE_THREAD", + "Radvisory_t", + "RawConn", + "RawSockaddr", + "RawSockaddrAny", + "RawSockaddrDatalink", + "RawSockaddrInet4", + "RawSockaddrInet6", + "RawSockaddrLinklayer", + "RawSockaddrNetlink", + "RawSockaddrUnix", + "RawSyscall", + "RawSyscall6", + "Read", + "ReadConsole", + "ReadDirectoryChanges", + "ReadDirent", + "ReadFile", + "Readlink", + "Reboot", + "Recvfrom", + "Recvmsg", + "RegCloseKey", + "RegEnumKeyEx", + "RegOpenKeyEx", + "RegQueryInfoKey", + "RegQueryValueEx", + "RemoveDirectory", + "Removexattr", + "Rename", + "Renameat", + "Revoke", + "Rlimit", + "Rmdir", + "RouteMessage", + "RouteRIB", + "RoutingMessage", + "RtAttr", + "RtGenmsg", + "RtMetrics", + "RtMsg", + "RtMsghdr", + "RtNexthop", + "Rusage", + "SCM_BINTIME", + "SCM_CREDENTIALS", + "SCM_CREDS", + "SCM_RIGHTS", + "SCM_TIMESTAMP", + "SCM_TIMESTAMPING", + "SCM_TIMESTAMPNS", + "SCM_TIMESTAMP_MONOTONIC", + "SHUT_RD", + "SHUT_RDWR", + "SHUT_WR", + "SID", + "SIDAndAttributes", + "SIGABRT", + "SIGALRM", + "SIGBUS", + "SIGCHLD", + "SIGCLD", + "SIGCONT", + "SIGEMT", + "SIGFPE", + "SIGHUP", + "SIGILL", + "SIGINFO", + "SIGINT", + "SIGIO", + "SIGIOT", + "SIGKILL", + "SIGLIBRT", + "SIGLWP", + "SIGPIPE", + "SIGPOLL", + "SIGPROF", + "SIGPWR", + "SIGQUIT", + "SIGSEGV", + "SIGSTKFLT", + "SIGSTOP", + "SIGSYS", + "SIGTERM", + "SIGTHR", + "SIGTRAP", + "SIGTSTP", + "SIGTTIN", + "SIGTTOU", + "SIGUNUSED", + "SIGURG", + "SIGUSR1", + "SIGUSR2", + "SIGVTALRM", + "SIGWINCH", + "SIGXCPU", + "SIGXFSZ", + "SIOCADDDLCI", + "SIOCADDMULTI", + "SIOCADDRT", + "SIOCAIFADDR", + "SIOCAIFGROUP", + "SIOCALIFADDR", + "SIOCARPIPLL", + "SIOCATMARK", + "SIOCAUTOADDR", + "SIOCAUTONETMASK", + "SIOCBRDGADD", + "SIOCBRDGADDS", + "SIOCBRDGARL", + "SIOCBRDGDADDR", + "SIOCBRDGDEL", + "SIOCBRDGDELS", + "SIOCBRDGFLUSH", + "SIOCBRDGFRL", + "SIOCBRDGGCACHE", + "SIOCBRDGGFD", + "SIOCBRDGGHT", + "SIOCBRDGGIFFLGS", + "SIOCBRDGGMA", + "SIOCBRDGGPARAM", + "SIOCBRDGGPRI", + "SIOCBRDGGRL", + "SIOCBRDGGSIFS", + "SIOCBRDGGTO", + "SIOCBRDGIFS", + "SIOCBRDGRTS", + "SIOCBRDGSADDR", + "SIOCBRDGSCACHE", + "SIOCBRDGSFD", + "SIOCBRDGSHT", + "SIOCBRDGSIFCOST", + "SIOCBRDGSIFFLGS", + "SIOCBRDGSIFPRIO", + "SIOCBRDGSMA", + "SIOCBRDGSPRI", + "SIOCBRDGSPROTO", + "SIOCBRDGSTO", + "SIOCBRDGSTXHC", + "SIOCDARP", + "SIOCDELDLCI", + "SIOCDELMULTI", + "SIOCDELRT", + "SIOCDEVPRIVATE", + "SIOCDIFADDR", + "SIOCDIFGROUP", + "SIOCDIFPHYADDR", + "SIOCDLIFADDR", + "SIOCDRARP", + "SIOCGARP", + "SIOCGDRVSPEC", + "SIOCGETKALIVE", + "SIOCGETLABEL", + "SIOCGETPFLOW", + "SIOCGETPFSYNC", + "SIOCGETSGCNT", + "SIOCGETVIFCNT", + "SIOCGETVLAN", + "SIOCGHIWAT", + "SIOCGIFADDR", + "SIOCGIFADDRPREF", + "SIOCGIFALIAS", + "SIOCGIFALTMTU", + "SIOCGIFASYNCMAP", + "SIOCGIFBOND", + "SIOCGIFBR", + "SIOCGIFBRDADDR", + "SIOCGIFCAP", + "SIOCGIFCONF", + "SIOCGIFCOUNT", + "SIOCGIFDATA", + "SIOCGIFDESCR", + "SIOCGIFDEVMTU", + "SIOCGIFDLT", + "SIOCGIFDSTADDR", + "SIOCGIFENCAP", + "SIOCGIFFIB", + "SIOCGIFFLAGS", + "SIOCGIFGATTR", + "SIOCGIFGENERIC", + "SIOCGIFGMEMB", + "SIOCGIFGROUP", + "SIOCGIFHARDMTU", + "SIOCGIFHWADDR", + "SIOCGIFINDEX", + "SIOCGIFKPI", + "SIOCGIFMAC", + "SIOCGIFMAP", + "SIOCGIFMEDIA", + "SIOCGIFMEM", + "SIOCGIFMETRIC", + "SIOCGIFMTU", + "SIOCGIFNAME", + "SIOCGIFNETMASK", + "SIOCGIFPDSTADDR", + "SIOCGIFPFLAGS", + "SIOCGIFPHYS", + "SIOCGIFPRIORITY", + "SIOCGIFPSRCADDR", + "SIOCGIFRDOMAIN", + "SIOCGIFRTLABEL", + "SIOCGIFSLAVE", + "SIOCGIFSTATUS", + "SIOCGIFTIMESLOT", + "SIOCGIFTXQLEN", + "SIOCGIFVLAN", + "SIOCGIFWAKEFLAGS", + "SIOCGIFXFLAGS", + "SIOCGLIFADDR", + "SIOCGLIFPHYADDR", + "SIOCGLIFPHYRTABLE", + "SIOCGLIFPHYTTL", + "SIOCGLINKSTR", + "SIOCGLOWAT", + "SIOCGPGRP", + "SIOCGPRIVATE_0", + "SIOCGPRIVATE_1", + "SIOCGRARP", + "SIOCGSPPPPARAMS", + "SIOCGSTAMP", + "SIOCGSTAMPNS", + "SIOCGVH", + "SIOCGVNETID", + "SIOCIFCREATE", + "SIOCIFCREATE2", + "SIOCIFDESTROY", + "SIOCIFGCLONERS", + "SIOCINITIFADDR", + "SIOCPROTOPRIVATE", + "SIOCRSLVMULTI", + "SIOCRTMSG", + "SIOCSARP", + "SIOCSDRVSPEC", + "SIOCSETKALIVE", + "SIOCSETLABEL", + "SIOCSETPFLOW", + "SIOCSETPFSYNC", + "SIOCSETVLAN", + "SIOCSHIWAT", + "SIOCSIFADDR", + "SIOCSIFADDRPREF", + "SIOCSIFALTMTU", + "SIOCSIFASYNCMAP", + "SIOCSIFBOND", + "SIOCSIFBR", + "SIOCSIFBRDADDR", + "SIOCSIFCAP", + "SIOCSIFDESCR", + "SIOCSIFDSTADDR", + "SIOCSIFENCAP", + "SIOCSIFFIB", + "SIOCSIFFLAGS", + "SIOCSIFGATTR", + "SIOCSIFGENERIC", + "SIOCSIFHWADDR", + "SIOCSIFHWBROADCAST", + "SIOCSIFKPI", + "SIOCSIFLINK", + "SIOCSIFLLADDR", + "SIOCSIFMAC", + "SIOCSIFMAP", + "SIOCSIFMEDIA", + "SIOCSIFMEM", + "SIOCSIFMETRIC", + "SIOCSIFMTU", + "SIOCSIFNAME", + "SIOCSIFNETMASK", + "SIOCSIFPFLAGS", + "SIOCSIFPHYADDR", + "SIOCSIFPHYS", + "SIOCSIFPRIORITY", + "SIOCSIFRDOMAIN", + "SIOCSIFRTLABEL", + "SIOCSIFRVNET", + "SIOCSIFSLAVE", + "SIOCSIFTIMESLOT", + "SIOCSIFTXQLEN", + "SIOCSIFVLAN", + "SIOCSIFVNET", + "SIOCSIFXFLAGS", + "SIOCSLIFPHYADDR", + "SIOCSLIFPHYRTABLE", + "SIOCSLIFPHYTTL", + "SIOCSLINKSTR", + "SIOCSLOWAT", + "SIOCSPGRP", + "SIOCSRARP", + "SIOCSSPPPPARAMS", + "SIOCSVH", + "SIOCSVNETID", + "SIOCZIFDATA", + "SIO_GET_EXTENSION_FUNCTION_POINTER", + "SIO_GET_INTERFACE_LIST", + "SIO_KEEPALIVE_VALS", + "SIO_UDP_CONNRESET", + "SOCK_CLOEXEC", + "SOCK_DCCP", + "SOCK_DGRAM", + "SOCK_FLAGS_MASK", + "SOCK_MAXADDRLEN", + "SOCK_NONBLOCK", + "SOCK_NOSIGPIPE", + "SOCK_PACKET", + "SOCK_RAW", + "SOCK_RDM", + "SOCK_SEQPACKET", + "SOCK_STREAM", + "SOL_AAL", + "SOL_ATM", + "SOL_DECNET", + "SOL_ICMPV6", + "SOL_IP", + "SOL_IPV6", + "SOL_IRDA", + "SOL_PACKET", + "SOL_RAW", + "SOL_SOCKET", + "SOL_TCP", + "SOL_X25", + "SOMAXCONN", + "SO_ACCEPTCONN", + "SO_ACCEPTFILTER", + "SO_ATTACH_FILTER", + "SO_BINDANY", + "SO_BINDTODEVICE", + "SO_BINTIME", + "SO_BROADCAST", + "SO_BSDCOMPAT", + "SO_DEBUG", + "SO_DETACH_FILTER", + "SO_DOMAIN", + "SO_DONTROUTE", + "SO_DONTTRUNC", + "SO_ERROR", + "SO_KEEPALIVE", + "SO_LABEL", + "SO_LINGER", + "SO_LINGER_SEC", + "SO_LISTENINCQLEN", + "SO_LISTENQLEN", + "SO_LISTENQLIMIT", + "SO_MARK", + "SO_NETPROC", + "SO_NKE", + "SO_NOADDRERR", + "SO_NOHEADER", + "SO_NOSIGPIPE", + "SO_NOTIFYCONFLICT", + "SO_NO_CHECK", + "SO_NO_DDP", + "SO_NO_OFFLOAD", + "SO_NP_EXTENSIONS", + "SO_NREAD", + "SO_NWRITE", + "SO_OOBINLINE", + "SO_OVERFLOWED", + "SO_PASSCRED", + "SO_PASSSEC", + "SO_PEERCRED", + "SO_PEERLABEL", + "SO_PEERNAME", + "SO_PEERSEC", + "SO_PRIORITY", + "SO_PROTOCOL", + "SO_PROTOTYPE", + "SO_RANDOMPORT", + "SO_RCVBUF", + "SO_RCVBUFFORCE", + "SO_RCVLOWAT", + "SO_RCVTIMEO", + "SO_RESTRICTIONS", + "SO_RESTRICT_DENYIN", + "SO_RESTRICT_DENYOUT", + "SO_RESTRICT_DENYSET", + "SO_REUSEADDR", + "SO_REUSEPORT", + "SO_REUSESHAREUID", + "SO_RTABLE", + "SO_RXQ_OVFL", + "SO_SECURITY_AUTHENTICATION", + "SO_SECURITY_ENCRYPTION_NETWORK", + "SO_SECURITY_ENCRYPTION_TRANSPORT", + "SO_SETFIB", + "SO_SNDBUF", + "SO_SNDBUFFORCE", + "SO_SNDLOWAT", + "SO_SNDTIMEO", + "SO_SPLICE", + "SO_TIMESTAMP", + "SO_TIMESTAMPING", + "SO_TIMESTAMPNS", + "SO_TIMESTAMP_MONOTONIC", + "SO_TYPE", + "SO_UPCALLCLOSEWAIT", + "SO_UPDATE_ACCEPT_CONTEXT", + "SO_UPDATE_CONNECT_CONTEXT", + "SO_USELOOPBACK", + "SO_USER_COOKIE", + "SO_VENDOR", + "SO_WANTMORE", + "SO_WANTOOBFLAG", + "SSLExtraCertChainPolicyPara", + "STANDARD_RIGHTS_ALL", + "STANDARD_RIGHTS_EXECUTE", + "STANDARD_RIGHTS_READ", + "STANDARD_RIGHTS_REQUIRED", + "STANDARD_RIGHTS_WRITE", + "STARTF_USESHOWWINDOW", + "STARTF_USESTDHANDLES", + "STD_ERROR_HANDLE", + "STD_INPUT_HANDLE", + "STD_OUTPUT_HANDLE", + "SUBLANG_ENGLISH_US", + "SW_FORCEMINIMIZE", + "SW_HIDE", + "SW_MAXIMIZE", + "SW_MINIMIZE", + "SW_NORMAL", + "SW_RESTORE", + "SW_SHOW", + "SW_SHOWDEFAULT", + "SW_SHOWMAXIMIZED", + "SW_SHOWMINIMIZED", + "SW_SHOWMINNOACTIVE", + "SW_SHOWNA", + "SW_SHOWNOACTIVATE", + "SW_SHOWNORMAL", + "SYMBOLIC_LINK_FLAG_DIRECTORY", + "SYNCHRONIZE", + "SYSCTL_VERSION", + "SYSCTL_VERS_0", + "SYSCTL_VERS_1", + "SYSCTL_VERS_MASK", + "SYS_ABORT2", + "SYS_ACCEPT", + "SYS_ACCEPT4", + "SYS_ACCEPT_NOCANCEL", + "SYS_ACCESS", + "SYS_ACCESS_EXTENDED", + "SYS_ACCT", + "SYS_ADD_KEY", + "SYS_ADD_PROFIL", + "SYS_ADJFREQ", + "SYS_ADJTIME", + "SYS_ADJTIMEX", + "SYS_AFS_SYSCALL", + "SYS_AIO_CANCEL", + "SYS_AIO_ERROR", + "SYS_AIO_FSYNC", + "SYS_AIO_READ", + "SYS_AIO_RETURN", + "SYS_AIO_SUSPEND", + "SYS_AIO_SUSPEND_NOCANCEL", + "SYS_AIO_WRITE", + "SYS_ALARM", + "SYS_ARCH_PRCTL", + "SYS_ARM_FADVISE64_64", + "SYS_ARM_SYNC_FILE_RANGE", + "SYS_ATGETMSG", + "SYS_ATPGETREQ", + "SYS_ATPGETRSP", + "SYS_ATPSNDREQ", + "SYS_ATPSNDRSP", + "SYS_ATPUTMSG", + "SYS_ATSOCKET", + "SYS_AUDIT", + "SYS_AUDITCTL", + "SYS_AUDITON", + "SYS_AUDIT_SESSION_JOIN", + "SYS_AUDIT_SESSION_PORT", + "SYS_AUDIT_SESSION_SELF", + "SYS_BDFLUSH", + "SYS_BIND", + "SYS_BINDAT", + "SYS_BREAK", + "SYS_BRK", + "SYS_BSDTHREAD_CREATE", + "SYS_BSDTHREAD_REGISTER", + "SYS_BSDTHREAD_TERMINATE", + "SYS_CAPGET", + "SYS_CAPSET", + "SYS_CAP_ENTER", + "SYS_CAP_FCNTLS_GET", + "SYS_CAP_FCNTLS_LIMIT", + "SYS_CAP_GETMODE", + "SYS_CAP_GETRIGHTS", + "SYS_CAP_IOCTLS_GET", + "SYS_CAP_IOCTLS_LIMIT", + "SYS_CAP_NEW", + "SYS_CAP_RIGHTS_GET", + "SYS_CAP_RIGHTS_LIMIT", + "SYS_CHDIR", + "SYS_CHFLAGS", + "SYS_CHFLAGSAT", + "SYS_CHMOD", + "SYS_CHMOD_EXTENDED", + "SYS_CHOWN", + "SYS_CHOWN32", + "SYS_CHROOT", + "SYS_CHUD", + "SYS_CLOCK_ADJTIME", + "SYS_CLOCK_GETCPUCLOCKID2", + "SYS_CLOCK_GETRES", + "SYS_CLOCK_GETTIME", + "SYS_CLOCK_NANOSLEEP", + "SYS_CLOCK_SETTIME", + "SYS_CLONE", + "SYS_CLOSE", + "SYS_CLOSEFROM", + "SYS_CLOSE_NOCANCEL", + "SYS_CONNECT", + "SYS_CONNECTAT", + "SYS_CONNECT_NOCANCEL", + "SYS_COPYFILE", + "SYS_CPUSET", + "SYS_CPUSET_GETAFFINITY", + "SYS_CPUSET_GETID", + "SYS_CPUSET_SETAFFINITY", + "SYS_CPUSET_SETID", + "SYS_CREAT", + "SYS_CREATE_MODULE", + "SYS_CSOPS", + "SYS_DELETE", + "SYS_DELETE_MODULE", + "SYS_DUP", + "SYS_DUP2", + "SYS_DUP3", + "SYS_EACCESS", + "SYS_EPOLL_CREATE", + "SYS_EPOLL_CREATE1", + "SYS_EPOLL_CTL", + "SYS_EPOLL_CTL_OLD", + "SYS_EPOLL_PWAIT", + "SYS_EPOLL_WAIT", + "SYS_EPOLL_WAIT_OLD", + "SYS_EVENTFD", + "SYS_EVENTFD2", + "SYS_EXCHANGEDATA", + "SYS_EXECVE", + "SYS_EXIT", + "SYS_EXIT_GROUP", + "SYS_EXTATTRCTL", + "SYS_EXTATTR_DELETE_FD", + "SYS_EXTATTR_DELETE_FILE", + "SYS_EXTATTR_DELETE_LINK", + "SYS_EXTATTR_GET_FD", + "SYS_EXTATTR_GET_FILE", + "SYS_EXTATTR_GET_LINK", + "SYS_EXTATTR_LIST_FD", + "SYS_EXTATTR_LIST_FILE", + "SYS_EXTATTR_LIST_LINK", + "SYS_EXTATTR_SET_FD", + "SYS_EXTATTR_SET_FILE", + "SYS_EXTATTR_SET_LINK", + "SYS_FACCESSAT", + "SYS_FADVISE64", + "SYS_FADVISE64_64", + "SYS_FALLOCATE", + "SYS_FANOTIFY_INIT", + "SYS_FANOTIFY_MARK", + "SYS_FCHDIR", + "SYS_FCHFLAGS", + "SYS_FCHMOD", + "SYS_FCHMODAT", + "SYS_FCHMOD_EXTENDED", + "SYS_FCHOWN", + "SYS_FCHOWN32", + "SYS_FCHOWNAT", + "SYS_FCHROOT", + "SYS_FCNTL", + "SYS_FCNTL64", + "SYS_FCNTL_NOCANCEL", + "SYS_FDATASYNC", + "SYS_FEXECVE", + "SYS_FFCLOCK_GETCOUNTER", + "SYS_FFCLOCK_GETESTIMATE", + "SYS_FFCLOCK_SETESTIMATE", + "SYS_FFSCTL", + "SYS_FGETATTRLIST", + "SYS_FGETXATTR", + "SYS_FHOPEN", + "SYS_FHSTAT", + "SYS_FHSTATFS", + "SYS_FILEPORT_MAKEFD", + "SYS_FILEPORT_MAKEPORT", + "SYS_FKTRACE", + "SYS_FLISTXATTR", + "SYS_FLOCK", + "SYS_FORK", + "SYS_FPATHCONF", + "SYS_FREEBSD6_FTRUNCATE", + "SYS_FREEBSD6_LSEEK", + "SYS_FREEBSD6_MMAP", + "SYS_FREEBSD6_PREAD", + "SYS_FREEBSD6_PWRITE", + "SYS_FREEBSD6_TRUNCATE", + "SYS_FREMOVEXATTR", + "SYS_FSCTL", + "SYS_FSETATTRLIST", + "SYS_FSETXATTR", + "SYS_FSGETPATH", + "SYS_FSTAT", + "SYS_FSTAT64", + "SYS_FSTAT64_EXTENDED", + "SYS_FSTATAT", + "SYS_FSTATAT64", + "SYS_FSTATFS", + "SYS_FSTATFS64", + "SYS_FSTATV", + "SYS_FSTATVFS1", + "SYS_FSTAT_EXTENDED", + "SYS_FSYNC", + "SYS_FSYNC_NOCANCEL", + "SYS_FSYNC_RANGE", + "SYS_FTIME", + "SYS_FTRUNCATE", + "SYS_FTRUNCATE64", + "SYS_FUTEX", + "SYS_FUTIMENS", + "SYS_FUTIMES", + "SYS_FUTIMESAT", + "SYS_GETATTRLIST", + "SYS_GETAUDIT", + "SYS_GETAUDIT_ADDR", + "SYS_GETAUID", + "SYS_GETCONTEXT", + "SYS_GETCPU", + "SYS_GETCWD", + "SYS_GETDENTS", + "SYS_GETDENTS64", + "SYS_GETDIRENTRIES", + "SYS_GETDIRENTRIES64", + "SYS_GETDIRENTRIESATTR", + "SYS_GETDTABLECOUNT", + "SYS_GETDTABLESIZE", + "SYS_GETEGID", + "SYS_GETEGID32", + "SYS_GETEUID", + "SYS_GETEUID32", + "SYS_GETFH", + "SYS_GETFSSTAT", + "SYS_GETFSSTAT64", + "SYS_GETGID", + "SYS_GETGID32", + "SYS_GETGROUPS", + "SYS_GETGROUPS32", + "SYS_GETHOSTUUID", + "SYS_GETITIMER", + "SYS_GETLCID", + "SYS_GETLOGIN", + "SYS_GETLOGINCLASS", + "SYS_GETPEERNAME", + "SYS_GETPGID", + "SYS_GETPGRP", + "SYS_GETPID", + "SYS_GETPMSG", + "SYS_GETPPID", + "SYS_GETPRIORITY", + "SYS_GETRESGID", + "SYS_GETRESGID32", + "SYS_GETRESUID", + "SYS_GETRESUID32", + "SYS_GETRLIMIT", + "SYS_GETRTABLE", + "SYS_GETRUSAGE", + "SYS_GETSGROUPS", + "SYS_GETSID", + "SYS_GETSOCKNAME", + "SYS_GETSOCKOPT", + "SYS_GETTHRID", + "SYS_GETTID", + "SYS_GETTIMEOFDAY", + "SYS_GETUID", + "SYS_GETUID32", + "SYS_GETVFSSTAT", + "SYS_GETWGROUPS", + "SYS_GETXATTR", + "SYS_GET_KERNEL_SYMS", + "SYS_GET_MEMPOLICY", + "SYS_GET_ROBUST_LIST", + "SYS_GET_THREAD_AREA", + "SYS_GTTY", + "SYS_IDENTITYSVC", + "SYS_IDLE", + "SYS_INITGROUPS", + "SYS_INIT_MODULE", + "SYS_INOTIFY_ADD_WATCH", + "SYS_INOTIFY_INIT", + "SYS_INOTIFY_INIT1", + "SYS_INOTIFY_RM_WATCH", + "SYS_IOCTL", + "SYS_IOPERM", + "SYS_IOPL", + "SYS_IOPOLICYSYS", + "SYS_IOPRIO_GET", + "SYS_IOPRIO_SET", + "SYS_IO_CANCEL", + "SYS_IO_DESTROY", + "SYS_IO_GETEVENTS", + "SYS_IO_SETUP", + "SYS_IO_SUBMIT", + "SYS_IPC", + "SYS_ISSETUGID", + "SYS_JAIL", + "SYS_JAIL_ATTACH", + "SYS_JAIL_GET", + "SYS_JAIL_REMOVE", + "SYS_JAIL_SET", + "SYS_KDEBUG_TRACE", + "SYS_KENV", + "SYS_KEVENT", + "SYS_KEVENT64", + "SYS_KEXEC_LOAD", + "SYS_KEYCTL", + "SYS_KILL", + "SYS_KLDFIND", + "SYS_KLDFIRSTMOD", + "SYS_KLDLOAD", + "SYS_KLDNEXT", + "SYS_KLDSTAT", + "SYS_KLDSYM", + "SYS_KLDUNLOAD", + "SYS_KLDUNLOADF", + "SYS_KQUEUE", + "SYS_KQUEUE1", + "SYS_KTIMER_CREATE", + "SYS_KTIMER_DELETE", + "SYS_KTIMER_GETOVERRUN", + "SYS_KTIMER_GETTIME", + "SYS_KTIMER_SETTIME", + "SYS_KTRACE", + "SYS_LCHFLAGS", + "SYS_LCHMOD", + "SYS_LCHOWN", + "SYS_LCHOWN32", + "SYS_LGETFH", + "SYS_LGETXATTR", + "SYS_LINK", + "SYS_LINKAT", + "SYS_LIO_LISTIO", + "SYS_LISTEN", + "SYS_LISTXATTR", + "SYS_LLISTXATTR", + "SYS_LOCK", + "SYS_LOOKUP_DCOOKIE", + "SYS_LPATHCONF", + "SYS_LREMOVEXATTR", + "SYS_LSEEK", + "SYS_LSETXATTR", + "SYS_LSTAT", + "SYS_LSTAT64", + "SYS_LSTAT64_EXTENDED", + "SYS_LSTATV", + "SYS_LSTAT_EXTENDED", + "SYS_LUTIMES", + "SYS_MAC_SYSCALL", + "SYS_MADVISE", + "SYS_MADVISE1", + "SYS_MAXSYSCALL", + "SYS_MBIND", + "SYS_MIGRATE_PAGES", + "SYS_MINCORE", + "SYS_MINHERIT", + "SYS_MKCOMPLEX", + "SYS_MKDIR", + "SYS_MKDIRAT", + "SYS_MKDIR_EXTENDED", + "SYS_MKFIFO", + "SYS_MKFIFOAT", + "SYS_MKFIFO_EXTENDED", + "SYS_MKNOD", + "SYS_MKNODAT", + "SYS_MLOCK", + "SYS_MLOCKALL", + "SYS_MMAP", + "SYS_MMAP2", + "SYS_MODCTL", + "SYS_MODFIND", + "SYS_MODFNEXT", + "SYS_MODIFY_LDT", + "SYS_MODNEXT", + "SYS_MODSTAT", + "SYS_MODWATCH", + "SYS_MOUNT", + "SYS_MOVE_PAGES", + "SYS_MPROTECT", + "SYS_MPX", + "SYS_MQUERY", + "SYS_MQ_GETSETATTR", + "SYS_MQ_NOTIFY", + "SYS_MQ_OPEN", + "SYS_MQ_TIMEDRECEIVE", + "SYS_MQ_TIMEDSEND", + "SYS_MQ_UNLINK", + "SYS_MREMAP", + "SYS_MSGCTL", + "SYS_MSGGET", + "SYS_MSGRCV", + "SYS_MSGRCV_NOCANCEL", + "SYS_MSGSND", + "SYS_MSGSND_NOCANCEL", + "SYS_MSGSYS", + "SYS_MSYNC", + "SYS_MSYNC_NOCANCEL", + "SYS_MUNLOCK", + "SYS_MUNLOCKALL", + "SYS_MUNMAP", + "SYS_NAME_TO_HANDLE_AT", + "SYS_NANOSLEEP", + "SYS_NEWFSTATAT", + "SYS_NFSCLNT", + "SYS_NFSSERVCTL", + "SYS_NFSSVC", + "SYS_NFSTAT", + "SYS_NICE", + "SYS_NLSTAT", + "SYS_NMOUNT", + "SYS_NSTAT", + "SYS_NTP_ADJTIME", + "SYS_NTP_GETTIME", + "SYS_OABI_SYSCALL_BASE", + "SYS_OBREAK", + "SYS_OLDFSTAT", + "SYS_OLDLSTAT", + "SYS_OLDOLDUNAME", + "SYS_OLDSTAT", + "SYS_OLDUNAME", + "SYS_OPEN", + "SYS_OPENAT", + "SYS_OPENBSD_POLL", + "SYS_OPEN_BY_HANDLE_AT", + "SYS_OPEN_EXTENDED", + "SYS_OPEN_NOCANCEL", + "SYS_OVADVISE", + "SYS_PACCEPT", + "SYS_PATHCONF", + "SYS_PAUSE", + "SYS_PCICONFIG_IOBASE", + "SYS_PCICONFIG_READ", + "SYS_PCICONFIG_WRITE", + "SYS_PDFORK", + "SYS_PDGETPID", + "SYS_PDKILL", + "SYS_PERF_EVENT_OPEN", + "SYS_PERSONALITY", + "SYS_PID_HIBERNATE", + "SYS_PID_RESUME", + "SYS_PID_SHUTDOWN_SOCKETS", + "SYS_PID_SUSPEND", + "SYS_PIPE", + "SYS_PIPE2", + "SYS_PIVOT_ROOT", + "SYS_PMC_CONTROL", + "SYS_PMC_GET_INFO", + "SYS_POLL", + "SYS_POLLTS", + "SYS_POLL_NOCANCEL", + "SYS_POSIX_FADVISE", + "SYS_POSIX_FALLOCATE", + "SYS_POSIX_OPENPT", + "SYS_POSIX_SPAWN", + "SYS_PPOLL", + "SYS_PRCTL", + "SYS_PREAD", + "SYS_PREAD64", + "SYS_PREADV", + "SYS_PREAD_NOCANCEL", + "SYS_PRLIMIT64", + "SYS_PROCCTL", + "SYS_PROCESS_POLICY", + "SYS_PROCESS_VM_READV", + "SYS_PROCESS_VM_WRITEV", + "SYS_PROC_INFO", + "SYS_PROF", + "SYS_PROFIL", + "SYS_PSELECT", + "SYS_PSELECT6", + "SYS_PSET_ASSIGN", + "SYS_PSET_CREATE", + "SYS_PSET_DESTROY", + "SYS_PSYNCH_CVBROAD", + "SYS_PSYNCH_CVCLRPREPOST", + "SYS_PSYNCH_CVSIGNAL", + "SYS_PSYNCH_CVWAIT", + "SYS_PSYNCH_MUTEXDROP", + "SYS_PSYNCH_MUTEXWAIT", + "SYS_PSYNCH_RW_DOWNGRADE", + "SYS_PSYNCH_RW_LONGRDLOCK", + "SYS_PSYNCH_RW_RDLOCK", + "SYS_PSYNCH_RW_UNLOCK", + "SYS_PSYNCH_RW_UNLOCK2", + "SYS_PSYNCH_RW_UPGRADE", + "SYS_PSYNCH_RW_WRLOCK", + "SYS_PSYNCH_RW_YIELDWRLOCK", + "SYS_PTRACE", + "SYS_PUTPMSG", + "SYS_PWRITE", + "SYS_PWRITE64", + "SYS_PWRITEV", + "SYS_PWRITE_NOCANCEL", + "SYS_QUERY_MODULE", + "SYS_QUOTACTL", + "SYS_RASCTL", + "SYS_RCTL_ADD_RULE", + "SYS_RCTL_GET_LIMITS", + "SYS_RCTL_GET_RACCT", + "SYS_RCTL_GET_RULES", + "SYS_RCTL_REMOVE_RULE", + "SYS_READ", + "SYS_READAHEAD", + "SYS_READDIR", + "SYS_READLINK", + "SYS_READLINKAT", + "SYS_READV", + "SYS_READV_NOCANCEL", + "SYS_READ_NOCANCEL", + "SYS_REBOOT", + "SYS_RECV", + "SYS_RECVFROM", + "SYS_RECVFROM_NOCANCEL", + "SYS_RECVMMSG", + "SYS_RECVMSG", + "SYS_RECVMSG_NOCANCEL", + "SYS_REMAP_FILE_PAGES", + "SYS_REMOVEXATTR", + "SYS_RENAME", + "SYS_RENAMEAT", + "SYS_REQUEST_KEY", + "SYS_RESTART_SYSCALL", + "SYS_REVOKE", + "SYS_RFORK", + "SYS_RMDIR", + "SYS_RTPRIO", + "SYS_RTPRIO_THREAD", + "SYS_RT_SIGACTION", + "SYS_RT_SIGPENDING", + "SYS_RT_SIGPROCMASK", + "SYS_RT_SIGQUEUEINFO", + "SYS_RT_SIGRETURN", + "SYS_RT_SIGSUSPEND", + "SYS_RT_SIGTIMEDWAIT", + "SYS_RT_TGSIGQUEUEINFO", + "SYS_SBRK", + "SYS_SCHED_GETAFFINITY", + "SYS_SCHED_GETPARAM", + "SYS_SCHED_GETSCHEDULER", + "SYS_SCHED_GET_PRIORITY_MAX", + "SYS_SCHED_GET_PRIORITY_MIN", + "SYS_SCHED_RR_GET_INTERVAL", + "SYS_SCHED_SETAFFINITY", + "SYS_SCHED_SETPARAM", + "SYS_SCHED_SETSCHEDULER", + "SYS_SCHED_YIELD", + "SYS_SCTP_GENERIC_RECVMSG", + "SYS_SCTP_GENERIC_SENDMSG", + "SYS_SCTP_GENERIC_SENDMSG_IOV", + "SYS_SCTP_PEELOFF", + "SYS_SEARCHFS", + "SYS_SECURITY", + "SYS_SELECT", + "SYS_SELECT_NOCANCEL", + "SYS_SEMCONFIG", + "SYS_SEMCTL", + "SYS_SEMGET", + "SYS_SEMOP", + "SYS_SEMSYS", + "SYS_SEMTIMEDOP", + "SYS_SEM_CLOSE", + "SYS_SEM_DESTROY", + "SYS_SEM_GETVALUE", + "SYS_SEM_INIT", + "SYS_SEM_OPEN", + "SYS_SEM_POST", + "SYS_SEM_TRYWAIT", + "SYS_SEM_UNLINK", + "SYS_SEM_WAIT", + "SYS_SEM_WAIT_NOCANCEL", + "SYS_SEND", + "SYS_SENDFILE", + "SYS_SENDFILE64", + "SYS_SENDMMSG", + "SYS_SENDMSG", + "SYS_SENDMSG_NOCANCEL", + "SYS_SENDTO", + "SYS_SENDTO_NOCANCEL", + "SYS_SETATTRLIST", + "SYS_SETAUDIT", + "SYS_SETAUDIT_ADDR", + "SYS_SETAUID", + "SYS_SETCONTEXT", + "SYS_SETDOMAINNAME", + "SYS_SETEGID", + "SYS_SETEUID", + "SYS_SETFIB", + "SYS_SETFSGID", + "SYS_SETFSGID32", + "SYS_SETFSUID", + "SYS_SETFSUID32", + "SYS_SETGID", + "SYS_SETGID32", + "SYS_SETGROUPS", + "SYS_SETGROUPS32", + "SYS_SETHOSTNAME", + "SYS_SETITIMER", + "SYS_SETLCID", + "SYS_SETLOGIN", + "SYS_SETLOGINCLASS", + "SYS_SETNS", + "SYS_SETPGID", + "SYS_SETPRIORITY", + "SYS_SETPRIVEXEC", + "SYS_SETREGID", + "SYS_SETREGID32", + "SYS_SETRESGID", + "SYS_SETRESGID32", + "SYS_SETRESUID", + "SYS_SETRESUID32", + "SYS_SETREUID", + "SYS_SETREUID32", + "SYS_SETRLIMIT", + "SYS_SETRTABLE", + "SYS_SETSGROUPS", + "SYS_SETSID", + "SYS_SETSOCKOPT", + "SYS_SETTID", + "SYS_SETTID_WITH_PID", + "SYS_SETTIMEOFDAY", + "SYS_SETUID", + "SYS_SETUID32", + "SYS_SETWGROUPS", + "SYS_SETXATTR", + "SYS_SET_MEMPOLICY", + "SYS_SET_ROBUST_LIST", + "SYS_SET_THREAD_AREA", + "SYS_SET_TID_ADDRESS", + "SYS_SGETMASK", + "SYS_SHARED_REGION_CHECK_NP", + "SYS_SHARED_REGION_MAP_AND_SLIDE_NP", + "SYS_SHMAT", + "SYS_SHMCTL", + "SYS_SHMDT", + "SYS_SHMGET", + "SYS_SHMSYS", + "SYS_SHM_OPEN", + "SYS_SHM_UNLINK", + "SYS_SHUTDOWN", + "SYS_SIGACTION", + "SYS_SIGALTSTACK", + "SYS_SIGNAL", + "SYS_SIGNALFD", + "SYS_SIGNALFD4", + "SYS_SIGPENDING", + "SYS_SIGPROCMASK", + "SYS_SIGQUEUE", + "SYS_SIGQUEUEINFO", + "SYS_SIGRETURN", + "SYS_SIGSUSPEND", + "SYS_SIGSUSPEND_NOCANCEL", + "SYS_SIGTIMEDWAIT", + "SYS_SIGWAIT", + "SYS_SIGWAITINFO", + "SYS_SOCKET", + "SYS_SOCKETCALL", + "SYS_SOCKETPAIR", + "SYS_SPLICE", + "SYS_SSETMASK", + "SYS_SSTK", + "SYS_STACK_SNAPSHOT", + "SYS_STAT", + "SYS_STAT64", + "SYS_STAT64_EXTENDED", + "SYS_STATFS", + "SYS_STATFS64", + "SYS_STATV", + "SYS_STATVFS1", + "SYS_STAT_EXTENDED", + "SYS_STIME", + "SYS_STTY", + "SYS_SWAPCONTEXT", + "SYS_SWAPCTL", + "SYS_SWAPOFF", + "SYS_SWAPON", + "SYS_SYMLINK", + "SYS_SYMLINKAT", + "SYS_SYNC", + "SYS_SYNCFS", + "SYS_SYNC_FILE_RANGE", + "SYS_SYSARCH", + "SYS_SYSCALL", + "SYS_SYSCALL_BASE", + "SYS_SYSFS", + "SYS_SYSINFO", + "SYS_SYSLOG", + "SYS_TEE", + "SYS_TGKILL", + "SYS_THREAD_SELFID", + "SYS_THR_CREATE", + "SYS_THR_EXIT", + "SYS_THR_KILL", + "SYS_THR_KILL2", + "SYS_THR_NEW", + "SYS_THR_SELF", + "SYS_THR_SET_NAME", + "SYS_THR_SUSPEND", + "SYS_THR_WAKE", + "SYS_TIME", + "SYS_TIMERFD_CREATE", + "SYS_TIMERFD_GETTIME", + "SYS_TIMERFD_SETTIME", + "SYS_TIMER_CREATE", + "SYS_TIMER_DELETE", + "SYS_TIMER_GETOVERRUN", + "SYS_TIMER_GETTIME", + "SYS_TIMER_SETTIME", + "SYS_TIMES", + "SYS_TKILL", + "SYS_TRUNCATE", + "SYS_TRUNCATE64", + "SYS_TUXCALL", + "SYS_UGETRLIMIT", + "SYS_ULIMIT", + "SYS_UMASK", + "SYS_UMASK_EXTENDED", + "SYS_UMOUNT", + "SYS_UMOUNT2", + "SYS_UNAME", + "SYS_UNDELETE", + "SYS_UNLINK", + "SYS_UNLINKAT", + "SYS_UNMOUNT", + "SYS_UNSHARE", + "SYS_USELIB", + "SYS_USTAT", + "SYS_UTIME", + "SYS_UTIMENSAT", + "SYS_UTIMES", + "SYS_UTRACE", + "SYS_UUIDGEN", + "SYS_VADVISE", + "SYS_VFORK", + "SYS_VHANGUP", + "SYS_VM86", + "SYS_VM86OLD", + "SYS_VMSPLICE", + "SYS_VM_PRESSURE_MONITOR", + "SYS_VSERVER", + "SYS_WAIT4", + "SYS_WAIT4_NOCANCEL", + "SYS_WAIT6", + "SYS_WAITEVENT", + "SYS_WAITID", + "SYS_WAITID_NOCANCEL", + "SYS_WAITPID", + "SYS_WATCHEVENT", + "SYS_WORKQ_KERNRETURN", + "SYS_WORKQ_OPEN", + "SYS_WRITE", + "SYS_WRITEV", + "SYS_WRITEV_NOCANCEL", + "SYS_WRITE_NOCANCEL", + "SYS_YIELD", + "SYS__LLSEEK", + "SYS__LWP_CONTINUE", + "SYS__LWP_CREATE", + "SYS__LWP_CTL", + "SYS__LWP_DETACH", + "SYS__LWP_EXIT", + "SYS__LWP_GETNAME", + "SYS__LWP_GETPRIVATE", + "SYS__LWP_KILL", + "SYS__LWP_PARK", + "SYS__LWP_SELF", + "SYS__LWP_SETNAME", + "SYS__LWP_SETPRIVATE", + "SYS__LWP_SUSPEND", + "SYS__LWP_UNPARK", + "SYS__LWP_UNPARK_ALL", + "SYS__LWP_WAIT", + "SYS__LWP_WAKEUP", + "SYS__NEWSELECT", + "SYS__PSET_BIND", + "SYS__SCHED_GETAFFINITY", + "SYS__SCHED_GETPARAM", + "SYS__SCHED_SETAFFINITY", + "SYS__SCHED_SETPARAM", + "SYS__SYSCTL", + "SYS__UMTX_LOCK", + "SYS__UMTX_OP", + "SYS__UMTX_UNLOCK", + "SYS___ACL_ACLCHECK_FD", + "SYS___ACL_ACLCHECK_FILE", + "SYS___ACL_ACLCHECK_LINK", + "SYS___ACL_DELETE_FD", + "SYS___ACL_DELETE_FILE", + "SYS___ACL_DELETE_LINK", + "SYS___ACL_GET_FD", + "SYS___ACL_GET_FILE", + "SYS___ACL_GET_LINK", + "SYS___ACL_SET_FD", + "SYS___ACL_SET_FILE", + "SYS___ACL_SET_LINK", + "SYS___CLONE", + "SYS___DISABLE_THREADSIGNAL", + "SYS___GETCWD", + "SYS___GETLOGIN", + "SYS___GET_TCB", + "SYS___MAC_EXECVE", + "SYS___MAC_GETFSSTAT", + "SYS___MAC_GET_FD", + "SYS___MAC_GET_FILE", + "SYS___MAC_GET_LCID", + "SYS___MAC_GET_LCTX", + "SYS___MAC_GET_LINK", + "SYS___MAC_GET_MOUNT", + "SYS___MAC_GET_PID", + "SYS___MAC_GET_PROC", + "SYS___MAC_MOUNT", + "SYS___MAC_SET_FD", + "SYS___MAC_SET_FILE", + "SYS___MAC_SET_LCTX", + "SYS___MAC_SET_LINK", + "SYS___MAC_SET_PROC", + "SYS___MAC_SYSCALL", + "SYS___OLD_SEMWAIT_SIGNAL", + "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", + "SYS___POSIX_CHOWN", + "SYS___POSIX_FCHOWN", + "SYS___POSIX_LCHOWN", + "SYS___POSIX_RENAME", + "SYS___PTHREAD_CANCELED", + "SYS___PTHREAD_CHDIR", + "SYS___PTHREAD_FCHDIR", + "SYS___PTHREAD_KILL", + "SYS___PTHREAD_MARKCANCEL", + "SYS___PTHREAD_SIGMASK", + "SYS___QUOTACTL", + "SYS___SEMCTL", + "SYS___SEMWAIT_SIGNAL", + "SYS___SEMWAIT_SIGNAL_NOCANCEL", + "SYS___SETLOGIN", + "SYS___SETUGID", + "SYS___SET_TCB", + "SYS___SIGACTION_SIGTRAMP", + "SYS___SIGTIMEDWAIT", + "SYS___SIGWAIT", + "SYS___SIGWAIT_NOCANCEL", + "SYS___SYSCTL", + "SYS___TFORK", + "SYS___THREXIT", + "SYS___THRSIGDIVERT", + "SYS___THRSLEEP", + "SYS___THRWAKEUP", + "S_ARCH1", + "S_ARCH2", + "S_BLKSIZE", + "S_IEXEC", + "S_IFBLK", + "S_IFCHR", + "S_IFDIR", + "S_IFIFO", + "S_IFLNK", + "S_IFMT", + "S_IFREG", + "S_IFSOCK", + "S_IFWHT", + "S_IREAD", + "S_IRGRP", + "S_IROTH", + "S_IRUSR", + "S_IRWXG", + "S_IRWXO", + "S_IRWXU", + "S_ISGID", + "S_ISTXT", + "S_ISUID", + "S_ISVTX", + "S_IWGRP", + "S_IWOTH", + "S_IWRITE", + "S_IWUSR", + "S_IXGRP", + "S_IXOTH", + "S_IXUSR", + "S_LOGIN_SET", + "SecurityAttributes", + "Seek", + "Select", + "Sendfile", + "Sendmsg", + "SendmsgN", + "Sendto", + "Servent", + "SetBpf", + "SetBpfBuflen", + "SetBpfDatalink", + "SetBpfHeadercmpl", + "SetBpfImmediate", + "SetBpfInterface", + "SetBpfPromisc", + "SetBpfTimeout", + "SetCurrentDirectory", + "SetEndOfFile", + "SetEnvironmentVariable", + "SetFileAttributes", + "SetFileCompletionNotificationModes", + "SetFilePointer", + "SetFileTime", + "SetHandleInformation", + "SetKevent", + "SetLsfPromisc", + "SetNonblock", + "Setdomainname", + "Setegid", + "Setenv", + "Seteuid", + "Setfsgid", + "Setfsuid", + "Setgid", + "Setgroups", + "Sethostname", + "Setlogin", + "Setpgid", + "Setpriority", + "Setprivexec", + "Setregid", + "Setresgid", + "Setresuid", + "Setreuid", + "Setrlimit", + "Setsid", + "Setsockopt", + "SetsockoptByte", + "SetsockoptICMPv6Filter", + "SetsockoptIPMreq", + "SetsockoptIPMreqn", + "SetsockoptIPv6Mreq", + "SetsockoptInet4Addr", + "SetsockoptInt", + "SetsockoptLinger", + "SetsockoptString", + "SetsockoptTimeval", + "Settimeofday", + "Setuid", + "Setxattr", + "Shutdown", + "SidTypeAlias", + "SidTypeComputer", + "SidTypeDeletedAccount", + "SidTypeDomain", + "SidTypeGroup", + "SidTypeInvalid", + "SidTypeLabel", + "SidTypeUnknown", + "SidTypeUser", + "SidTypeWellKnownGroup", + "Signal", + "SizeofBpfHdr", + "SizeofBpfInsn", + "SizeofBpfProgram", + "SizeofBpfStat", + "SizeofBpfVersion", + "SizeofBpfZbuf", + "SizeofBpfZbufHeader", + "SizeofCmsghdr", + "SizeofICMPv6Filter", + "SizeofIPMreq", + "SizeofIPMreqn", + "SizeofIPv6MTUInfo", + "SizeofIPv6Mreq", + "SizeofIfAddrmsg", + "SizeofIfAnnounceMsghdr", + "SizeofIfData", + "SizeofIfInfomsg", + "SizeofIfMsghdr", + "SizeofIfaMsghdr", + "SizeofIfmaMsghdr", + "SizeofIfmaMsghdr2", + "SizeofInet4Pktinfo", + "SizeofInet6Pktinfo", + "SizeofInotifyEvent", + "SizeofLinger", + "SizeofMsghdr", + "SizeofNlAttr", + "SizeofNlMsgerr", + "SizeofNlMsghdr", + "SizeofRtAttr", + "SizeofRtGenmsg", + "SizeofRtMetrics", + "SizeofRtMsg", + "SizeofRtMsghdr", + "SizeofRtNexthop", + "SizeofSockFilter", + "SizeofSockFprog", + "SizeofSockaddrAny", + "SizeofSockaddrDatalink", + "SizeofSockaddrInet4", + "SizeofSockaddrInet6", + "SizeofSockaddrLinklayer", + "SizeofSockaddrNetlink", + "SizeofSockaddrUnix", + "SizeofTCPInfo", + "SizeofUcred", + "SlicePtrFromStrings", + "SockFilter", + "SockFprog", + "Sockaddr", + "SockaddrDatalink", + "SockaddrGen", + "SockaddrInet4", + "SockaddrInet6", + "SockaddrLinklayer", + "SockaddrNetlink", + "SockaddrUnix", + "Socket", + "SocketControlMessage", + "SocketDisableIPv6", + "Socketpair", + "Splice", + "StartProcess", + "StartupInfo", + "Stat", + "Stat_t", + "Statfs", + "Statfs_t", + "Stderr", + "Stdin", + "Stdout", + "StringBytePtr", + "StringByteSlice", + "StringSlicePtr", + "StringToSid", + "StringToUTF16", + "StringToUTF16Ptr", + "Symlink", + "Sync", + "SyncFileRange", + "SysProcAttr", + "SysProcIDMap", + "Syscall", + "Syscall12", + "Syscall15", + "Syscall18", + "Syscall6", + "Syscall9", + "Sysctl", + "SysctlUint32", + "Sysctlnode", + "Sysinfo", + "Sysinfo_t", + "Systemtime", + "TCGETS", + "TCIFLUSH", + "TCIOFLUSH", + "TCOFLUSH", + "TCPInfo", + "TCPKeepalive", + "TCP_CA_NAME_MAX", + "TCP_CONGCTL", + "TCP_CONGESTION", + "TCP_CONNECTIONTIMEOUT", + "TCP_CORK", + "TCP_DEFER_ACCEPT", + "TCP_INFO", + "TCP_KEEPALIVE", + "TCP_KEEPCNT", + "TCP_KEEPIDLE", + "TCP_KEEPINIT", + "TCP_KEEPINTVL", + "TCP_LINGER2", + "TCP_MAXBURST", + "TCP_MAXHLEN", + "TCP_MAXOLEN", + "TCP_MAXSEG", + "TCP_MAXWIN", + "TCP_MAX_SACK", + "TCP_MAX_WINSHIFT", + "TCP_MD5SIG", + "TCP_MD5SIG_MAXKEYLEN", + "TCP_MINMSS", + "TCP_MINMSSOVERLOAD", + "TCP_MSS", + "TCP_NODELAY", + "TCP_NOOPT", + "TCP_NOPUSH", + "TCP_NSTATES", + "TCP_QUICKACK", + "TCP_RXT_CONNDROPTIME", + "TCP_RXT_FINDROP", + "TCP_SACK_ENABLE", + "TCP_SYNCNT", + "TCP_VENDOR", + "TCP_WINDOW_CLAMP", + "TCSAFLUSH", + "TCSETS", + "TF_DISCONNECT", + "TF_REUSE_SOCKET", + "TF_USE_DEFAULT_WORKER", + "TF_USE_KERNEL_APC", + "TF_USE_SYSTEM_THREAD", + "TF_WRITE_BEHIND", + "TH32CS_INHERIT", + "TH32CS_SNAPALL", + "TH32CS_SNAPHEAPLIST", + "TH32CS_SNAPMODULE", + "TH32CS_SNAPMODULE32", + "TH32CS_SNAPPROCESS", + "TH32CS_SNAPTHREAD", + "TIME_ZONE_ID_DAYLIGHT", + "TIME_ZONE_ID_STANDARD", + "TIME_ZONE_ID_UNKNOWN", + "TIOCCBRK", + "TIOCCDTR", + "TIOCCONS", + "TIOCDCDTIMESTAMP", + "TIOCDRAIN", + "TIOCDSIMICROCODE", + "TIOCEXCL", + "TIOCEXT", + "TIOCFLAG_CDTRCTS", + "TIOCFLAG_CLOCAL", + "TIOCFLAG_CRTSCTS", + "TIOCFLAG_MDMBUF", + "TIOCFLAG_PPS", + "TIOCFLAG_SOFTCAR", + "TIOCFLUSH", + "TIOCGDEV", + "TIOCGDRAINWAIT", + "TIOCGETA", + "TIOCGETD", + "TIOCGFLAGS", + "TIOCGICOUNT", + "TIOCGLCKTRMIOS", + "TIOCGLINED", + "TIOCGPGRP", + "TIOCGPTN", + "TIOCGQSIZE", + "TIOCGRANTPT", + "TIOCGRS485", + "TIOCGSERIAL", + "TIOCGSID", + "TIOCGSIZE", + "TIOCGSOFTCAR", + "TIOCGTSTAMP", + "TIOCGWINSZ", + "TIOCINQ", + "TIOCIXOFF", + "TIOCIXON", + "TIOCLINUX", + "TIOCMBIC", + "TIOCMBIS", + "TIOCMGDTRWAIT", + "TIOCMGET", + "TIOCMIWAIT", + "TIOCMODG", + "TIOCMODS", + "TIOCMSDTRWAIT", + "TIOCMSET", + "TIOCM_CAR", + "TIOCM_CD", + "TIOCM_CTS", + "TIOCM_DCD", + "TIOCM_DSR", + "TIOCM_DTR", + "TIOCM_LE", + "TIOCM_RI", + "TIOCM_RNG", + "TIOCM_RTS", + "TIOCM_SR", + "TIOCM_ST", + "TIOCNOTTY", + "TIOCNXCL", + "TIOCOUTQ", + "TIOCPKT", + "TIOCPKT_DATA", + "TIOCPKT_DOSTOP", + "TIOCPKT_FLUSHREAD", + "TIOCPKT_FLUSHWRITE", + "TIOCPKT_IOCTL", + "TIOCPKT_NOSTOP", + "TIOCPKT_START", + "TIOCPKT_STOP", + "TIOCPTMASTER", + "TIOCPTMGET", + "TIOCPTSNAME", + "TIOCPTYGNAME", + "TIOCPTYGRANT", + "TIOCPTYUNLK", + "TIOCRCVFRAME", + "TIOCREMOTE", + "TIOCSBRK", + "TIOCSCONS", + "TIOCSCTTY", + "TIOCSDRAINWAIT", + "TIOCSDTR", + "TIOCSERCONFIG", + "TIOCSERGETLSR", + "TIOCSERGETMULTI", + "TIOCSERGSTRUCT", + "TIOCSERGWILD", + "TIOCSERSETMULTI", + "TIOCSERSWILD", + "TIOCSER_TEMT", + "TIOCSETA", + "TIOCSETAF", + "TIOCSETAW", + "TIOCSETD", + "TIOCSFLAGS", + "TIOCSIG", + "TIOCSLCKTRMIOS", + "TIOCSLINED", + "TIOCSPGRP", + "TIOCSPTLCK", + "TIOCSQSIZE", + "TIOCSRS485", + "TIOCSSERIAL", + "TIOCSSIZE", + "TIOCSSOFTCAR", + "TIOCSTART", + "TIOCSTAT", + "TIOCSTI", + "TIOCSTOP", + "TIOCSTSTAMP", + "TIOCSWINSZ", + "TIOCTIMESTAMP", + "TIOCUCNTL", + "TIOCVHANGUP", + "TIOCXMTFRAME", + "TOKEN_ADJUST_DEFAULT", + "TOKEN_ADJUST_GROUPS", + "TOKEN_ADJUST_PRIVILEGES", + "TOKEN_ADJUST_SESSIONID", + "TOKEN_ALL_ACCESS", + "TOKEN_ASSIGN_PRIMARY", + "TOKEN_DUPLICATE", + "TOKEN_EXECUTE", + "TOKEN_IMPERSONATE", + "TOKEN_QUERY", + "TOKEN_QUERY_SOURCE", + "TOKEN_READ", + "TOKEN_WRITE", + "TOSTOP", + "TRUNCATE_EXISTING", + "TUNATTACHFILTER", + "TUNDETACHFILTER", + "TUNGETFEATURES", + "TUNGETIFF", + "TUNGETSNDBUF", + "TUNGETVNETHDRSZ", + "TUNSETDEBUG", + "TUNSETGROUP", + "TUNSETIFF", + "TUNSETLINK", + "TUNSETNOCSUM", + "TUNSETOFFLOAD", + "TUNSETOWNER", + "TUNSETPERSIST", + "TUNSETSNDBUF", + "TUNSETTXFILTER", + "TUNSETVNETHDRSZ", + "Tee", + "TerminateProcess", + "Termios", + "Tgkill", + "Time", + "Time_t", + "Times", + "Timespec", + "TimespecToNsec", + "Timeval", + "Timeval32", + "TimevalToNsec", + "Timex", + "Timezoneinformation", + "Tms", + "Token", + "TokenAccessInformation", + "TokenAuditPolicy", + "TokenDefaultDacl", + "TokenElevation", + "TokenElevationType", + "TokenGroups", + "TokenGroupsAndPrivileges", + "TokenHasRestrictions", + "TokenImpersonationLevel", + "TokenIntegrityLevel", + "TokenLinkedToken", + "TokenLogonSid", + "TokenMandatoryPolicy", + "TokenOrigin", + "TokenOwner", + "TokenPrimaryGroup", + "TokenPrivileges", + "TokenRestrictedSids", + "TokenSandBoxInert", + "TokenSessionId", + "TokenSessionReference", + "TokenSource", + "TokenStatistics", + "TokenType", + "TokenUIAccess", + "TokenUser", + "TokenVirtualizationAllowed", + "TokenVirtualizationEnabled", + "Tokenprimarygroup", + "Tokenuser", + "TranslateAccountName", + "TranslateName", + "TransmitFile", + "TransmitFileBuffers", + "Truncate", + "UNIX_PATH_MAX", + "USAGE_MATCH_TYPE_AND", + "USAGE_MATCH_TYPE_OR", + "UTF16FromString", + "UTF16PtrFromString", + "UTF16ToString", + "Ucred", + "Umask", + "Uname", + "Undelete", + "UnixCredentials", + "UnixRights", + "Unlink", + "Unlinkat", + "UnmapViewOfFile", + "Unmount", + "Unsetenv", + "Unshare", + "UserInfo10", + "Ustat", + "Ustat_t", + "Utimbuf", + "Utime", + "Utimes", + "UtimesNano", + "Utsname", + "VDISCARD", + "VDSUSP", + "VEOF", + "VEOL", + "VEOL2", + "VERASE", + "VERASE2", + "VINTR", + "VKILL", + "VLNEXT", + "VMIN", + "VQUIT", + "VREPRINT", + "VSTART", + "VSTATUS", + "VSTOP", + "VSUSP", + "VSWTC", + "VT0", + "VT1", + "VTDLY", + "VTIME", + "VWERASE", + "VirtualLock", + "VirtualUnlock", + "WAIT_ABANDONED", + "WAIT_FAILED", + "WAIT_OBJECT_0", + "WAIT_TIMEOUT", + "WALL", + "WALLSIG", + "WALTSIG", + "WCLONE", + "WCONTINUED", + "WCOREFLAG", + "WEXITED", + "WLINUXCLONE", + "WNOHANG", + "WNOTHREAD", + "WNOWAIT", + "WNOZOMBIE", + "WOPTSCHECKED", + "WORDSIZE", + "WSABuf", + "WSACleanup", + "WSADESCRIPTION_LEN", + "WSAData", + "WSAEACCES", + "WSAECONNABORTED", + "WSAECONNRESET", + "WSAEnumProtocols", + "WSAID_CONNECTEX", + "WSAIoctl", + "WSAPROTOCOL_LEN", + "WSAProtocolChain", + "WSAProtocolInfo", + "WSARecv", + "WSARecvFrom", + "WSASYS_STATUS_LEN", + "WSASend", + "WSASendTo", + "WSASendto", + "WSAStartup", + "WSTOPPED", + "WTRAPPED", + "WUNTRACED", + "Wait4", + "WaitForSingleObject", + "WaitStatus", + "Win32FileAttributeData", + "Win32finddata", + "Write", + "WriteConsole", + "WriteFile", + "X509_ASN_ENCODING", + "XCASE", + "XP1_CONNECTIONLESS", + "XP1_CONNECT_DATA", + "XP1_DISCONNECT_DATA", + "XP1_EXPEDITED_DATA", + "XP1_GRACEFUL_CLOSE", + "XP1_GUARANTEED_DELIVERY", + "XP1_GUARANTEED_ORDER", + "XP1_IFS_HANDLES", + "XP1_MESSAGE_ORIENTED", + "XP1_MULTIPOINT_CONTROL_PLANE", + "XP1_MULTIPOINT_DATA_PLANE", + "XP1_PARTIAL_MESSAGE", + "XP1_PSEUDO_STREAM", + "XP1_QOS_SUPPORTED", + "XP1_SAN_SUPPORT_SDP", + "XP1_SUPPORT_BROADCAST", + "XP1_SUPPORT_MULTIPOINT", + "XP1_UNI_RECV", + "XP1_UNI_SEND", + }, + "syscall/js": []string{ + "CopyBytesToGo", + "CopyBytesToJS", + "Error", + "Func", + "FuncOf", + "Global", + "Null", + "Type", + "TypeBoolean", + "TypeFunction", + "TypeNull", + "TypeNumber", + "TypeObject", + "TypeString", + "TypeSymbol", + "TypeUndefined", + "Undefined", + "Value", + "ValueError", + "ValueOf", + "Wrapper", + }, + "testing": []string{ + "AllocsPerRun", + "B", + "Benchmark", + "BenchmarkResult", + "Cover", + "CoverBlock", + "CoverMode", + "Coverage", + "Init", + "InternalBenchmark", + "InternalExample", + "InternalTest", + "M", + "Main", + "MainStart", + "PB", + "RegisterCover", + "RunBenchmarks", + "RunExamples", + "RunTests", + "Short", + "T", + "TB", + "Verbose", + }, + "testing/iotest": []string{ + "DataErrReader", + "ErrTimeout", + "HalfReader", + "NewReadLogger", + "NewWriteLogger", + "OneByteReader", + "TimeoutReader", + "TruncateWriter", + }, + "testing/quick": []string{ + "Check", + "CheckEqual", + "CheckEqualError", + "CheckError", + "Config", + "Generator", + "SetupError", + "Value", + }, + "text/scanner": []string{ + "Char", + "Comment", + "EOF", + "Float", + "GoTokens", + "GoWhitespace", + "Ident", + "Int", + "Position", + "RawString", + "ScanChars", + "ScanComments", + "ScanFloats", + "ScanIdents", + "ScanInts", + "ScanRawStrings", + "ScanStrings", + "Scanner", + "SkipComments", + "String", + "TokenString", + }, + "text/tabwriter": []string{ + "AlignRight", + "Debug", + "DiscardEmptyColumns", + "Escape", + "FilterHTML", + "NewWriter", + "StripEscape", + "TabIndent", + "Writer", + }, + "text/template": []string{ + "ExecError", + "FuncMap", + "HTMLEscape", + "HTMLEscapeString", + "HTMLEscaper", + "IsTrue", + "JSEscape", + "JSEscapeString", + "JSEscaper", + "Must", + "New", + "ParseFiles", + "ParseGlob", + "Template", + "URLQueryEscaper", + }, + "text/template/parse": []string{ + "ActionNode", + "BoolNode", + "BranchNode", + "ChainNode", + "CommandNode", + "DotNode", + "FieldNode", + "IdentifierNode", + "IfNode", + "IsEmptyTree", + "ListNode", + "New", + "NewIdentifier", + "NilNode", + "Node", + "NodeAction", + "NodeBool", + "NodeChain", + "NodeCommand", + "NodeDot", + "NodeField", + "NodeIdentifier", + "NodeIf", + "NodeList", + "NodeNil", + "NodeNumber", + "NodePipe", + "NodeRange", + "NodeString", + "NodeTemplate", + "NodeText", + "NodeType", + "NodeVariable", + "NodeWith", + "NumberNode", + "Parse", + "PipeNode", + "Pos", + "RangeNode", + "StringNode", + "TemplateNode", + "TextNode", + "Tree", + "VariableNode", + "WithNode", + }, + "time": []string{ + "ANSIC", + "After", + "AfterFunc", + "April", + "August", + "Date", + "December", + "Duration", + "February", + "FixedZone", + "Friday", + "Hour", + "January", + "July", + "June", + "Kitchen", + "LoadLocation", + "LoadLocationFromTZData", + "Local", + "Location", + "March", + "May", + "Microsecond", + "Millisecond", + "Minute", + "Monday", + "Month", + "Nanosecond", + "NewTicker", + "NewTimer", + "November", + "Now", + "October", + "Parse", + "ParseDuration", + "ParseError", + "ParseInLocation", + "RFC1123", + "RFC1123Z", + "RFC3339", + "RFC3339Nano", + "RFC822", + "RFC822Z", + "RFC850", + "RubyDate", + "Saturday", + "Second", + "September", + "Since", + "Sleep", + "Stamp", + "StampMicro", + "StampMilli", + "StampNano", + "Sunday", + "Thursday", + "Tick", + "Ticker", + "Time", + "Timer", + "Tuesday", + "UTC", + "Unix", + "UnixDate", + "Until", + "Wednesday", + "Weekday", + }, + "unicode": []string{ + "ASCII_Hex_Digit", + "Adlam", + "Ahom", + "Anatolian_Hieroglyphs", + "Arabic", + "Armenian", + "Avestan", + "AzeriCase", + "Balinese", + "Bamum", + "Bassa_Vah", + "Batak", + "Bengali", + "Bhaiksuki", + "Bidi_Control", + "Bopomofo", + "Brahmi", + "Braille", + "Buginese", + "Buhid", + "C", + "Canadian_Aboriginal", + "Carian", + "CaseRange", + "CaseRanges", + "Categories", + "Caucasian_Albanian", + "Cc", + "Cf", + "Chakma", + "Cham", + "Cherokee", + "Co", + "Common", + "Coptic", + "Cs", + "Cuneiform", + "Cypriot", + "Cyrillic", + "Dash", + "Deprecated", + "Deseret", + "Devanagari", + "Diacritic", + "Digit", + "Dogra", + "Duployan", + "Egyptian_Hieroglyphs", + "Elbasan", + "Elymaic", + "Ethiopic", + "Extender", + "FoldCategory", + "FoldScript", + "Georgian", + "Glagolitic", + "Gothic", + "Grantha", + "GraphicRanges", + "Greek", + "Gujarati", + "Gunjala_Gondi", + "Gurmukhi", + "Han", + "Hangul", + "Hanifi_Rohingya", + "Hanunoo", + "Hatran", + "Hebrew", + "Hex_Digit", + "Hiragana", + "Hyphen", + "IDS_Binary_Operator", + "IDS_Trinary_Operator", + "Ideographic", + "Imperial_Aramaic", + "In", + "Inherited", + "Inscriptional_Pahlavi", + "Inscriptional_Parthian", + "Is", + "IsControl", + "IsDigit", + "IsGraphic", + "IsLetter", + "IsLower", + "IsMark", + "IsNumber", + "IsOneOf", + "IsPrint", + "IsPunct", + "IsSpace", + "IsSymbol", + "IsTitle", + "IsUpper", + "Javanese", + "Join_Control", + "Kaithi", + "Kannada", + "Katakana", + "Kayah_Li", + "Kharoshthi", + "Khmer", + "Khojki", + "Khudawadi", + "L", + "Lao", + "Latin", + "Lepcha", + "Letter", + "Limbu", + "Linear_A", + "Linear_B", + "Lisu", + "Ll", + "Lm", + "Lo", + "Logical_Order_Exception", + "Lower", + "LowerCase", + "Lt", + "Lu", + "Lycian", + "Lydian", + "M", + "Mahajani", + "Makasar", + "Malayalam", + "Mandaic", + "Manichaean", + "Marchen", + "Mark", + "Masaram_Gondi", + "MaxASCII", + "MaxCase", + "MaxLatin1", + "MaxRune", + "Mc", + "Me", + "Medefaidrin", + "Meetei_Mayek", + "Mende_Kikakui", + "Meroitic_Cursive", + "Meroitic_Hieroglyphs", + "Miao", + "Mn", + "Modi", + "Mongolian", + "Mro", + "Multani", + "Myanmar", + "N", + "Nabataean", + "Nandinagari", + "Nd", + "New_Tai_Lue", + "Newa", + "Nko", + "Nl", + "No", + "Noncharacter_Code_Point", + "Number", + "Nushu", + "Nyiakeng_Puachue_Hmong", + "Ogham", + "Ol_Chiki", + "Old_Hungarian", + "Old_Italic", + "Old_North_Arabian", + "Old_Permic", + "Old_Persian", + "Old_Sogdian", + "Old_South_Arabian", + "Old_Turkic", + "Oriya", + "Osage", + "Osmanya", + "Other", + "Other_Alphabetic", + "Other_Default_Ignorable_Code_Point", + "Other_Grapheme_Extend", + "Other_ID_Continue", + "Other_ID_Start", + "Other_Lowercase", + "Other_Math", + "Other_Uppercase", + "P", + "Pahawh_Hmong", + "Palmyrene", + "Pattern_Syntax", + "Pattern_White_Space", + "Pau_Cin_Hau", + "Pc", + "Pd", + "Pe", + "Pf", + "Phags_Pa", + "Phoenician", + "Pi", + "Po", + "Prepended_Concatenation_Mark", + "PrintRanges", + "Properties", + "Ps", + "Psalter_Pahlavi", + "Punct", + "Quotation_Mark", + "Radical", + "Range16", + "Range32", + "RangeTable", + "Regional_Indicator", + "Rejang", + "ReplacementChar", + "Runic", + "S", + "STerm", + "Samaritan", + "Saurashtra", + "Sc", + "Scripts", + "Sentence_Terminal", + "Sharada", + "Shavian", + "Siddham", + "SignWriting", + "SimpleFold", + "Sinhala", + "Sk", + "Sm", + "So", + "Soft_Dotted", + "Sogdian", + "Sora_Sompeng", + "Soyombo", + "Space", + "SpecialCase", + "Sundanese", + "Syloti_Nagri", + "Symbol", + "Syriac", + "Tagalog", + "Tagbanwa", + "Tai_Le", + "Tai_Tham", + "Tai_Viet", + "Takri", + "Tamil", + "Tangut", + "Telugu", + "Terminal_Punctuation", + "Thaana", + "Thai", + "Tibetan", + "Tifinagh", + "Tirhuta", + "Title", + "TitleCase", + "To", + "ToLower", + "ToTitle", + "ToUpper", + "TurkishCase", + "Ugaritic", + "Unified_Ideograph", + "Upper", + "UpperCase", + "UpperLower", + "Vai", + "Variation_Selector", + "Version", + "Wancho", + "Warang_Citi", + "White_Space", + "Yi", + "Z", + "Zanabazar_Square", + "Zl", + "Zp", + "Zs", + }, + "unicode/utf16": []string{ + "Decode", + "DecodeRune", + "Encode", + "EncodeRune", + "IsSurrogate", + }, + "unicode/utf8": []string{ + "DecodeLastRune", + "DecodeLastRuneInString", + "DecodeRune", + "DecodeRuneInString", + "EncodeRune", + "FullRune", + "FullRuneInString", + "MaxRune", + "RuneCount", + "RuneCountInString", + "RuneError", + "RuneLen", + "RuneSelf", + "RuneStart", + "UTFMax", + "Valid", + "ValidRune", + "ValidString", + }, + "unsafe": []string{ + "Alignof", + "ArbitraryType", + "Offsetof", + "Pointer", + "Sizeof", + }, +} diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE new file mode 100644 index 00000000..e4a47e17 --- /dev/null +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/xerrors/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README new file mode 100644 index 00000000..aac7867a --- /dev/null +++ b/vendor/golang.org/x/xerrors/README @@ -0,0 +1,2 @@ +This repository holds the transition packages for the new Go 1.13 error values. +See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go new file mode 100644 index 00000000..4317f248 --- /dev/null +++ b/vendor/golang.org/x/xerrors/adaptor.go @@ -0,0 +1,193 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" +) + +// FormatError calls the FormatError method of f with an errors.Printer +// configured according to s and verb, and writes the result to s. +func FormatError(f Formatter, s fmt.State, verb rune) { + // Assuming this function is only called from the Format method, and given + // that FormatError takes precedence over Format, it cannot be called from + // any package that supports errors.Formatter. It is therefore safe to + // disregard that State may be a specific printer implementation and use one + // of our choice instead. + + // limitations: does not support printing error as Go struct. + + var ( + sep = " " // separator before next error + p = &state{State: s} + direct = true + ) + + var err error = f + + switch verb { + // Note that this switch must match the preference order + // for ordinary string printing (%#v before %+v, and so on). + + case 'v': + if s.Flag('#') { + if stringer, ok := err.(fmt.GoStringer); ok { + io.WriteString(&p.buf, stringer.GoString()) + goto exit + } + // proceed as if it were %v + } else if s.Flag('+') { + p.printDetail = true + sep = "\n - " + } + case 's': + case 'q', 'x', 'X': + // Use an intermediate buffer in the rare cases that precision, + // truncation, or one of the alternative verbs (q, x, and X) are + // specified. + direct = false + + default: + p.buf.WriteString("%!") + p.buf.WriteRune(verb) + p.buf.WriteByte('(') + switch { + case err != nil: + p.buf.WriteString(reflect.TypeOf(f).String()) + default: + p.buf.WriteString("") + } + p.buf.WriteByte(')') + io.Copy(s, &p.buf) + return + } + +loop: + for { + switch v := err.(type) { + case Formatter: + err = v.FormatError((*printer)(p)) + case fmt.Formatter: + v.Format(p, 'v') + break loop + default: + io.WriteString(&p.buf, v.Error()) + break loop + } + if err == nil { + break + } + if p.needColon || !p.printDetail { + p.buf.WriteByte(':') + p.needColon = false + } + p.buf.WriteString(sep) + p.inDetail = false + p.needNewline = false + } + +exit: + width, okW := s.Width() + prec, okP := s.Precision() + + if !direct || (okW && width > 0) || okP { + // Construct format string from State s. + format := []byte{'%'} + if s.Flag('-') { + format = append(format, '-') + } + if s.Flag('+') { + format = append(format, '+') + } + if s.Flag(' ') { + format = append(format, ' ') + } + if okW { + format = strconv.AppendInt(format, int64(width), 10) + } + if okP { + format = append(format, '.') + format = strconv.AppendInt(format, int64(prec), 10) + } + format = append(format, string(verb)...) + fmt.Fprintf(s, string(format), p.buf.String()) + } else { + io.Copy(s, &p.buf) + } +} + +var detailSep = []byte("\n ") + +// state tracks error printing state. It implements fmt.State. +type state struct { + fmt.State + buf bytes.Buffer + + printDetail bool + inDetail bool + needColon bool + needNewline bool +} + +func (s *state) Write(b []byte) (n int, err error) { + if s.printDetail { + if len(b) == 0 { + return 0, nil + } + if s.inDetail && s.needColon { + s.needNewline = true + if b[0] == '\n' { + b = b[1:] + } + } + k := 0 + for i, c := range b { + if s.needNewline { + if s.inDetail && s.needColon { + s.buf.WriteByte(':') + s.needColon = false + } + s.buf.Write(detailSep) + s.needNewline = false + } + if c == '\n' { + s.buf.Write(b[k:i]) + k = i + 1 + s.needNewline = true + } + } + s.buf.Write(b[k:]) + if !s.inDetail { + s.needColon = true + } + } else if !s.inDetail { + s.buf.Write(b) + } + return len(b), nil +} + +// printer wraps a state to implement an xerrors.Printer. +type printer state + +func (s *printer) Print(args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprint((*state)(s), args...) + } +} + +func (s *printer) Printf(format string, args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprintf((*state)(s), format, args...) + } +} + +func (s *printer) Detail() bool { + s.inDetail = true + return s.printDetail +} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg new file mode 100644 index 00000000..3f8b14b6 --- /dev/null +++ b/vendor/golang.org/x/xerrors/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go new file mode 100644 index 00000000..eef99d9d --- /dev/null +++ b/vendor/golang.org/x/xerrors/doc.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xerrors implements functions to manipulate errors. +// +// This package is based on the Go 2 proposal for error values: +// https://golang.org/design/29934-error-values +// +// These functions were incorporated into the standard library's errors package +// in Go 1.13: +// - Is +// - As +// - Unwrap +// +// Also, Errorf's %w verb was incorporated into fmt.Errorf. +// +// Use this package to get equivalent behavior in all supported Go versions. +// +// No other features of this package were included in Go 1.13, and at present +// there are no plans to include any of them. +package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go new file mode 100644 index 00000000..e88d3772 --- /dev/null +++ b/vendor/golang.org/x/xerrors/errors.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import "fmt" + +// errorString is a trivial implementation of error. +type errorString struct { + s string + frame Frame +} + +// New returns an error that formats as the given text. +// +// The returned error contains a Frame set to the caller's location and +// implements Formatter to show this information when printed with details. +func New(text string) error { + return &errorString{text, Caller(1)} +} + +func (e *errorString) Error() string { + return e.s +} + +func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *errorString) FormatError(p Printer) (next error) { + p.Print(e.s) + e.frame.Format(p) + return nil +} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go new file mode 100644 index 00000000..829862dd --- /dev/null +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/xerrors/internal" +) + +const percentBangString = "%!" + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. +// +// The returned error includes the file and line number of the caller when +// formatted with additional detail enabled. If the last argument is an error +// the returned error's Format method will return it if the format string ends +// with ": %s", ": %v", or ": %w". If the last argument is an error and the +// format string ends with ": %w", the returned error implements an Unwrap +// method returning it. +// +// If the format specifier includes a %w verb with an error operand in a +// position other than at the end, the returned error will still implement an +// Unwrap method returning the operand, but the error's Format method will not +// return the wrapped error. +// +// It is invalid to include more than one %w verb or to supply it with an +// operand that does not implement the error interface. The %w verb is otherwise +// a synonym for %v. +func Errorf(format string, a ...interface{}) error { + format = formatPlusW(format) + // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. + wrap := strings.HasSuffix(format, ": %w") + idx, format2, ok := parsePercentW(format) + percentWElsewhere := !wrap && idx >= 0 + if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { + err := errorAt(a, len(a)-1) + if err == nil { + return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + } + // TODO: this is not entirely correct. The error value could be + // printed elsewhere in format if it mixes numbered with unnumbered + // substitutions. With relatively small changes to doPrintf we can + // have it optionally ignore extra arguments and pass the argument + // list in its entirety. + msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + if wrap { + return &wrapError{msg, err, frame} + } + return &noWrapError{msg, err, frame} + } + // Support %w anywhere. + // TODO: don't repeat the wrapped error's message when %w occurs in the middle. + msg := fmt.Sprintf(format2, a...) + if idx < 0 { + return &noWrapError{msg, nil, Caller(1)} + } + err := errorAt(a, idx) + if !ok || err == nil { + // Too many %ws or argument of %w is not an error. Approximate the Go + // 1.13 fmt.Errorf message. + return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} + } + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + return &wrapError{msg, err, frame} +} + +func errorAt(args []interface{}, i int) error { + if i < 0 || i >= len(args) { + return nil + } + err, ok := args[i].(error) + if !ok { + return nil + } + return err +} + +// formatPlusW is used to avoid the vet check that will barf at %w. +func formatPlusW(s string) string { + return s +} + +// Return the index of the only %w in format, or -1 if none. +// Also return a rewritten format string with %w replaced by %v, and +// false if there is more than one %w. +// TODO: handle "%[N]w". +func parsePercentW(format string) (idx int, newFormat string, ok bool) { + // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. + idx = -1 + ok = true + n := 0 + sz := 0 + var isW bool + for i := 0; i < len(format); i += sz { + if format[i] != '%' { + sz = 1 + continue + } + // "%%" is not a format directive. + if i+1 < len(format) && format[i+1] == '%' { + sz = 2 + continue + } + sz, isW = parsePrintfVerb(format[i:]) + if isW { + if idx >= 0 { + ok = false + } else { + idx = n + } + // "Replace" the last character, the 'w', with a 'v'. + p := i + sz - 1 + format = format[:p] + "v" + format[p+1:] + } + n++ + } + return idx, format, ok +} + +// Parse the printf verb starting with a % at s[0]. +// Return how many bytes it occupies and whether the verb is 'w'. +func parsePrintfVerb(s string) (int, bool) { + // Assume only that the directive is a sequence of non-letters followed by a single letter. + sz := 0 + var r rune + for i := 1; i < len(s); i += sz { + r, sz = utf8.DecodeRuneInString(s[i:]) + if unicode.IsLetter(r) { + return i + sz, r == 'w' + } + } + return len(s), false +} + +type noWrapError struct { + msg string + err error + frame Frame +} + +func (e *noWrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *noWrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +type wrapError struct { + msg string + err error + frame Frame +} + +func (e *wrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *wrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +func (e *wrapError) Unwrap() error { + return e.err +} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go new file mode 100644 index 00000000..1bc9c26b --- /dev/null +++ b/vendor/golang.org/x/xerrors/format.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +// A Formatter formats error messages. +type Formatter interface { + error + + // FormatError prints the receiver's first error and returns the next error in + // the error chain, if any. + FormatError(p Printer) (next error) +} + +// A Printer formats error messages. +// +// The most common implementation of Printer is the one provided by package fmt +// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message +// typically provide their own implementations. +type Printer interface { + // Print appends args to the message output. + Print(args ...interface{}) + + // Printf writes a formatted string. + Printf(format string, args ...interface{}) + + // Detail reports whether error detail is requested. + // After the first call to Detail, all text written to the Printer + // is formatted as additional detail, or ignored when + // detail has not been requested. + // If Detail returns false, the caller can avoid printing the detail at all. + Detail() bool +} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go new file mode 100644 index 00000000..0de628ec --- /dev/null +++ b/vendor/golang.org/x/xerrors/frame.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "runtime" +) + +// A Frame contains part of a call stack. +type Frame struct { + // Make room for three PCs: the one we were asked for, what it called, + // and possibly a PC for skipPleaseUseCallersFrames. See: + // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 + frames [3]uintptr +} + +// Caller returns a Frame that describes a frame on the caller's stack. +// The argument skip is the number of frames to skip over. +// Caller(0) returns the frame for the caller of Caller. +func Caller(skip int) Frame { + var s Frame + runtime.Callers(skip+1, s.frames[:]) + return s +} + +// location reports the file, line, and function of a frame. +// +// The returned function may be "" even if file and line are not. +func (f Frame) location() (function, file string, line int) { + frames := runtime.CallersFrames(f.frames[:]) + if _, ok := frames.Next(); !ok { + return "", "", 0 + } + fr, ok := frames.Next() + if !ok { + return "", "", 0 + } + return fr.Function, fr.File, fr.Line +} + +// Format prints the stack as error detail. +// It should be called from an error's Format implementation +// after printing any other error detail. +func (f Frame) Format(p Printer) { + if p.Detail() { + function, file, line := f.location() + if function != "" { + p.Printf("%s\n ", function) + } + if file != "" { + p.Printf("%s:%d\n", file, line) + } + } +} diff --git a/vendor/golang.org/x/xerrors/go.mod b/vendor/golang.org/x/xerrors/go.mod new file mode 100644 index 00000000..870d4f61 --- /dev/null +++ b/vendor/golang.org/x/xerrors/go.mod @@ -0,0 +1,3 @@ +module golang.org/x/xerrors + +go 1.11 diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go new file mode 100644 index 00000000..89f4eca5 --- /dev/null +++ b/vendor/golang.org/x/xerrors/internal/internal.go @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// EnableTrace indicates whether stack information should be recorded in errors. +var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go new file mode 100644 index 00000000..9a3b5103 --- /dev/null +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "reflect" +) + +// A Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} + +// Opaque returns an error with the same error formatting as err +// but that does not match err and cannot be unwrapped. +func Opaque(err error) error { + return noWrapper{err} +} + +type noWrapper struct { + error +} + +func (e noWrapper) FormatError(p Printer) (next error) { + if f, ok := e.error.(Formatter); ok { + return f.FormatError(p) + } + p.Print(e.error) + return nil +} + +// Unwrap returns the result of calling the Unwrap method on err, if err implements +// Unwrap. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + u, ok := err.(Wrapper) + if !ok { + return nil + } + return u.Unwrap() +} + +// Is reports whether any error in err's chain matches target. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + // TODO: consider supporing target.Is(err). This would allow + // user-definable predicates, but also may allow for coping with sloppy + // APIs, thereby making it easier to get away with them. + if err = Unwrap(err); err == nil { + return false + } + } +} + +// As finds the first error in err's chain that matches the type to which target +// points, and if so, sets the target to its value and returns true. An error +// matches a type if it is assignable to the target type, or if it has a method +// As(interface{}) bool such that As(target) returns true. As will panic if target +// is not a non-nil pointer to a type which implements error or is of interface type. +// +// The As method should set the target to its value and return true if err +// matches the type to which target points. +func As(err error, target interface{}) bool { + if target == nil { + panic("errors: target cannot be nil") + } + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("errors: target must be a non-nil pointer") + } + if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { + panic("errors: *target must be interface or implement error") + } + targetType := typ.Elem() + for err != nil { + if reflect.TypeOf(err).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(err)) + return true + } + if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { + return true + } + err = Unwrap(err) + } + return false +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS index f73b7257..f0702905 100644 --- a/vendor/google.golang.org/api/AUTHORS +++ b/vendor/google.golang.org/api/AUTHORS @@ -8,3 +8,4 @@ # Please keep the list sorted. Google Inc. +LightStep Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS index fe55ebff..788677b8 100644 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -45,6 +45,7 @@ Jason Hall Johan Euphrosine Kostik Shtoyk Kunpei Sakai +Matthew Dolan Matthew Whisenhunt Michael McGreevy Nick Craig-Wood diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go deleted file mode 100644 index 83778508..00000000 --- a/vendor/google.golang.org/api/gensupport/jsonfloat.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gensupport - -import ( - "encoding/json" - "errors" - "fmt" - "math" -) - -// JSONFloat64 is a float64 that supports proper unmarshaling of special float -// values in JSON, according to -// https://developers.google.com/protocol-buffers/docs/proto3#json. Although -// that is a proto-to-JSON spec, it applies to all Google APIs. -// -// The jsonpb package -// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has -// similar functionality, but only for direct translation from proto messages -// to JSON. -type JSONFloat64 float64 - -func (f *JSONFloat64) UnmarshalJSON(data []byte) error { - var ff float64 - if err := json.Unmarshal(data, &ff); err == nil { - *f = JSONFloat64(ff) - return nil - } - var s string - if err := json.Unmarshal(data, &s); err == nil { - switch s { - case "NaN": - ff = math.NaN() - case "Infinity": - ff = math.Inf(1) - case "-Infinity": - ff = math.Inf(-1) - default: - return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) - } - *f = JSONFloat64(ff) - return nil - } - return errors.New("google.golang.org/api/internal: data not float or string") -} diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go deleted file mode 100644 index 57993930..00000000 --- a/vendor/google.golang.org/api/gensupport/send.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "context" - "encoding/json" - "errors" - "net/http" -) - -// Hook is the type of a function that is called once before each HTTP request -// that is sent by a generated API. It returns a function that is called after -// the request returns. -// Hooks are not called if the context is nil. -type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) - -var hooks []Hook - -// RegisterHook registers a Hook to be called before each HTTP request by a -// generated API. Hooks are called in the order they are registered. Each -// hook can return a function; if it is non-nil, it is called after the HTTP -// request returns. These functions are called in the reverse order. -// RegisterHook should not be called concurrently with itself or SendRequest. -func RegisterHook(h Hook) { - hooks = append(hooks, h) -} - -// SendRequest sends a single HTTP request using the given client. -// If ctx is non-nil, it calls all hooks, then sends the request with -// req.WithContext, then calls any functions returned by the hooks in -// reverse order. -func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - // Disallow Accept-Encoding because it interferes with the automatic gzip handling - // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. - if _, ok := req.Header["Accept-Encoding"]; ok { - return nil, errors.New("google api: custom Accept-Encoding headers not allowed") - } - if ctx == nil { - return client.Do(req) - } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request. - resp, err := send(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err -} - -func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// DecodeResponse decodes the body of res into target. If there is no body, -// target is unchanged. -func DecodeResponse(target interface{}, res *http.Response) error { - if res.StatusCode == http.StatusNoContent { - return nil - } - return json.NewDecoder(res.Body).Decode(target) -} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index ab537676..d1784f1a 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -1,4 +1,4 @@ -// Copyright 2011 Google Inc. All rights reserved. +// Copyright 2011 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,7 +16,7 @@ import ( "net/url" "strings" - "google.golang.org/api/googleapi/internal/uritemplates" + "google.golang.org/api/internal/third_party/uritemplates" ) // ContentTyper is an interface for Readers which know (or would like @@ -54,7 +54,7 @@ const ( // DefaultUploadChunkSize is the default chunk size to use for resumable // uploads if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 + DefaultUploadChunkSize = 16 * 1024 * 1024 // MinUploadChunkSize is the minimum chunk size that can be used for // resumable uploads. All user-specified chunk sizes must be multiple of @@ -69,6 +69,8 @@ type Error struct { // Message is the server response message and is only populated when // explicitly referenced by the JSON server response. Message string `json:"message"` + // Details provide more context to an error. + Details []interface{} `json:"details"` // Body is the raw response returned by the server. // It is often but not always JSON, depending on how the request fails. Body string @@ -95,6 +97,16 @@ func (e *Error) Error() string { if e.Message != "" { fmt.Fprintf(&buf, "%s", e.Message) } + if len(e.Details) > 0 { + var detailBuf bytes.Buffer + enc := json.NewEncoder(&detailBuf) + enc.SetIndent("", " ") + if err := enc.Encode(e.Details); err == nil { + fmt.Fprint(&buf, "\nDetails:") + fmt.Fprintf(&buf, "\n%s", detailBuf.String()) + + } + } if len(e.Errors) == 0 { return strings.TrimSpace(buf.String()) } @@ -256,14 +268,22 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions { // "http://www.golang.org/topics/myproject/mytopic". It strips all parent // references (e.g. ../..) as well as anything after the host // (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). +// +// ResolveRelative panics if either basestr or relstr is not able to be parsed. func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) + u, err := url.Parse(basestr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", basestr)) + } afterColonPath := "" if i := strings.IndexRune(relstr, ':'); i > 0 { afterColonPath = relstr[i+1:] relstr = relstr[:i] } - rel, _ := url.Parse(relstr) + rel, err := url.Parse(relstr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", relstr)) + } u = u.ResolveReference(rel) us := u.String() if afterColonPath != "" { @@ -331,7 +351,7 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { } // A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance // // Partial responses can dramatically reduce the amount of data that must be sent to your application. // In order to request partial responses, you can specify the full list of fields @@ -348,9 +368,6 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // // svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax -// // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ type Field string diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE deleted file mode 100644 index de9c88cb..00000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index eca1ea25..61720ec2 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -1,9 +1,13 @@ -// Copyright 2012 Google Inc. All rights reserved. +// Copyright 2012 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package transport contains HTTP transports used to make // authenticated API requests. +// +// This package is DEPRECATED. Users should instead use, +// +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( @@ -13,6 +17,8 @@ import ( // APIKey is an HTTP Transport which wraps an underlying transport and // appends an API Key "key" parameter to the URL of outgoing requests. +// +// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. type APIKey struct { // Key is the API Key to set on requests. Key string diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a280e302..fabf74d5 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -1,4 +1,4 @@ -// Copyright 2013 Google Inc. All rights reserved. +// Copyright 2013 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 00000000..fedcce15 --- /dev/null +++ b/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 69b8659f..75e9445e 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -1,16 +1,6 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package internal @@ -100,3 +90,16 @@ func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) } return google.JWTAccessTokenSourceFromJSON(data, audience) } + +// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. +// +// NOTE(cbro): consider promoting this to a field on google.Credentials. +func QuotaProjectFromCreds(cred *google.Credentials) string { + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(cred.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/internal/gensupport/buffer.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/buffer.go rename to vendor/google.golang.org/api/internal/gensupport/buffer.go diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/internal/gensupport/doc.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/doc.go rename to vendor/google.golang.org/api/internal/gensupport/doc.go diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/json.go rename to vendor/google.golang.org/api/internal/gensupport/json.go diff --git a/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go new file mode 100644 index 00000000..13c2f930 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go @@ -0,0 +1,47 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "errors" + "fmt" + "math" +) + +// JSONFloat64 is a float64 that supports proper unmarshaling of special float +// values in JSON, according to +// https://developers.google.com/protocol-buffers/docs/proto3#json. Although +// that is a proto-to-JSON spec, it applies to all Google APIs. +// +// The jsonpb package +// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has +// similar functionality, but only for direct translation from proto messages +// to JSON. +type JSONFloat64 float64 + +func (f *JSONFloat64) UnmarshalJSON(data []byte) error { + var ff float64 + if err := json.Unmarshal(data, &ff); err == nil { + *f = JSONFloat64(ff) + return nil + } + var s string + if err := json.Unmarshal(data, &s); err == nil { + switch s { + case "NaN": + ff = math.NaN() + case "Infinity": + ff = math.Inf(1) + case "-Infinity": + ff = math.Inf(-1) + default: + return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) + } + *f = JSONFloat64(ff) + return nil + } + return errors.New("google.golang.org/api/internal: data not float or string") +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go similarity index 98% rename from vendor/google.golang.org/api/gensupport/media.go rename to vendor/google.golang.org/api/internal/gensupport/media.go index 0ef96b3f..0288cc30 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -290,6 +290,9 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + toCleanup := []io.Closer{ + combined, + } if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -299,10 +302,16 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB mimeBoundary = params["boundary"] } r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) + toCleanup = append(toCleanup, r) return r, nil } } - cleanup = func() { combined.Close() } + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } reqHeaders.Set("Content-Type", ctype) body = combined } diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/params.go rename to vendor/google.golang.org/api/internal/gensupport/params.go diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go similarity index 88% rename from vendor/google.golang.org/api/gensupport/resumable.go rename to vendor/google.golang.org/api/internal/gensupport/resumable.go index e67ccd9a..edc87ec2 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -28,6 +28,8 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } + // isRetryable is a platform-specific hook, specified in retryable_linux.go + syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -160,21 +162,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var shouldRetry = func(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - return err.Temporary() - } - return false - } // There are a couple of cases where it's possible for err and resp to both // be non-nil. However, we expose a simpler contract to our callers: exactly @@ -239,3 +226,33 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err return prepareReturn(resp, err) } } + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, following guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped + // errors. + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go new file mode 100644 index 00000000..fed998b5 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package gensupport + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go new file mode 100644 index 00000000..3338c8d1 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" +) + +// Hook is the type of a function that is called once before each HTTP request +// that is sent by a generated API. It returns a function that is called after +// the request returns. +// Hooks are not called if the context is nil. +type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) + +var hooks []Hook + +// RegisterHook registers a Hook to be called before each HTTP request by a +// generated API. Hooks are called in the order they are registered. Each +// hook can return a function; if it is non-nil, it is called after the HTTP +// request returns. These functions are called in the reverse order. +// RegisterHook should not be called concurrently with itself or SendRequest. +func RegisterHook(h Hook) { + hooks = append(hooks, h) +} + +// SendRequest sends a single HTTP request using the given client. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request. + resp, err := send(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// SendRequestWithRetry sends a single HTTP request using the given client, +// with retries if a retryable error is returned. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request with retry. + resp, err := sendAndRetry(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + var resp *http.Response + var err error + + // Loop to retry the request, up to the context deadline. + var pause time.Duration + bo := backoff() + + for { + select { + case <-ctx.Done(): + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err == nil { + err = ctx.Err() + } + return resp, err + case <-time.After(pause): + } + + resp, err = client.Do(req.WithContext(ctx)) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we can retry the request. A retry can only be done if the error + // is retryable and the request body can be re-created using GetBody (this + // will not be possible if the body was unbuffered). + if req.GetBody == nil || !shouldRetry(status, err) { + break + } + var errBody error + req.Body, errBody = req.GetBody() + if errBody != nil { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + return resp, err +} + +// DecodeResponse decodes the body of res into target. If there is no body, +// target is unchanged. +func DecodeResponse(target interface{}, res *http.Response) error { + if res.StatusCode == http.StatusNoContent { + return nil + } + return json.NewDecoder(res.Body).Decode(target) +} diff --git a/vendor/google.golang.org/api/internal/gensupport/version.go b/vendor/google.golang.org/api/internal/gensupport/version.go new file mode 100644 index 00000000..23f6aa24 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/version.go @@ -0,0 +1,53 @@ +// Copyright 2020 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "runtime" + "strings" + "unicode" +) + +// GoVersion returns the Go runtime version. The returned string +// has no whitespace. +func GoVersion() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go deleted file mode 100644 index a4426dcb..00000000 --- a/vendor/google.golang.org/api/internal/pool.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "errors" - - "google.golang.org/grpc/naming" -) - -// PoolResolver provides a fixed list of addresses to load balance between -// and does not provide further updates. -type PoolResolver struct { - poolSize int - dialOpt *DialSettings - ch chan []*naming.Update -} - -// NewPoolResolver returns a PoolResolver -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func NewPoolResolver(size int, o *DialSettings) *PoolResolver { - return &PoolResolver{poolSize: size, dialOpt: o} -} - -// Resolve returns a Watcher for the endpoint defined by the DialSettings -// provided to NewPoolResolver. -func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { - if r.dialOpt.Endpoint == "" { - return nil, errors.New("no endpoint configured") - } - addrs := make([]*naming.Update, 0, r.poolSize) - for i := 0; i < r.poolSize; i++ { - addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) - } - r.ch = make(chan []*naming.Update, 1) - r.ch <- addrs - return r, nil -} - -// Next returns a static list of updates on the first call, -// and blocks indefinitely until Close is called on subsequent calls. -func (r *PoolResolver) Next() ([]*naming.Update, error) { - return <-r.ch, nil -} - -// Close releases resources associated with the pool and causes Next to unblock. -func (r *PoolResolver) Close() { - close(r.ch) -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 062301c6..f435519d 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -1,21 +1,12 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package internal supports the options and transport packages. package internal import ( + "crypto/tls" "errors" "net/http" @@ -27,19 +18,26 @@ import ( // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - NoAuth bool + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -79,6 +77,12 @@ func (ds *DialSettings) Validate() error { if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { return errors.New("multiple credential options provided") } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } if ds.HTTPClient != nil && ds.GRPCConn != nil { return errors.New("WithHTTPClient is incompatible with WithGRPCConn") } @@ -91,6 +95,12 @@ func (ds *DialSettings) Validate() error { if ds.HTTPClient != nil && ds.RequestReason != "" { return errors.New("WithHTTPClient is incompatible with RequestReason") } + if ds.HTTPClient != nil && ds.ClientCertSource != nil { + return errors.New("WithHTTPClient is incompatible with WithClientCertSource") + } + if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { + return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") + } return nil } diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE new file mode 100644 index 00000000..7109c6ef --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Joshua Tacoma. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA new file mode 100644 index 00000000..c7f86fcd --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA @@ -0,0 +1,14 @@ +name: "uritemplates" +description: + "Package uritemplates is a level 4 implementation of RFC 6570 (URI " + "Template, http://tools.ietf.org/html/rfc6570)." + +third_party { + url { + type: GIT + value: "https://github.com/jtacoma/uritemplates" + } + version: "0.1" + last_upgrade_date { year: 2014 month: 8 day: 18 } + license_type: NOTICE +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go similarity index 98% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go index 63bf0538..8c27d19d 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go @@ -191,7 +191,7 @@ func parseTerm(term string) (result templateTerm, err error) { err = errors.New("not a valid name: " + result.name) } if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") + err = errors.New("both explode and prefix modifiers on same term") } return result, err } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go similarity index 100% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go index 3c8ea773..1799b5d9 100644 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package iterator provides support for standard Google API iterators. // See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. @@ -82,17 +72,23 @@ type PageInfo struct { // It is not a stable interface. var NewPageInfo = newPageInfo -// If an iterator can support paging, its iterator-creating method should call -// this (via the NewPageInfo variable above). +// newPageInfo creates and returns a PageInfo and a next func. If an iterator can +// support paging, its iterator-creating method should call this. Each time the +// iterator's Next is called, it should call the returned next fn to determine +// whether a next item exists, and if so it should pop an item from the buffer. // -// The fetch, bufLen and takeBuf arguments provide access to the -// iterator's internal slice of buffered items. They behave as described in -// PageInfo, above. +// The fetch, bufLen and takeBuf arguments provide access to the iterator's +// internal slice of buffered items. They behave as described in PageInfo, above. // // The return value is the PageInfo.next method bound to the returned PageInfo value. // (Returning it avoids exporting PageInfo.next.) -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { - pi := &PageInfo{ +// +// Note: the returned PageInfo and next fn do not remove items from the buffer. +// It is up to the iterator using these to remove items from the buffer: +// typically by performing a pop in its Next. If items are not removed from the +// buffer, memory may grow unbounded. +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { + pi = &PageInfo{ fetch: fetch, bufLen: bufLen, takeBuf: takeBuf, diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go index 0636a829..d06f918b 100644 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build go1.9 diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go index 74d3a4b5..0ce107a6 100644 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build !go1.9 diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go new file mode 100644 index 00000000..ff5b530c --- /dev/null +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -0,0 +1,40 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internaloption contains options used internally by Google client code. +package internaloption + +import ( + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +type defaultEndpointOption string + +func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpoint = string(o) +} + +// WithDefaultEndpoint is an option that indicates the default endpoint. +// +// It should only be used internally by generated clients. +// +// This is similar to WithEndpoint, but allows us to determine whether the user has overriden the default endpoint. +func WithDefaultEndpoint(url string) option.ClientOption { + return defaultEndpointOption(url) +} + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +// +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 0a1c2dba..b7c40d60 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -1,21 +1,12 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package option contains options for Google API clients. package option import ( + "crypto/tls" "net/http" "golang.org/x/oauth2" @@ -124,7 +115,7 @@ func (w withHTTPClient) Apply(o *internal.DialSettings) { } // WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option many only be +// connection to use as the basis of communications. This option may only be // used with services that support gRPC as their communication transport. When // used, the WithGRPCConn option takes precedent over all other supplied // options. @@ -152,6 +143,7 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. +// // This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) @@ -160,8 +152,7 @@ func WithGRPCConnectionPool(size int) ClientOption { type withGRPCConnectionPool int func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) - o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) + o.GRPCConnPoolSize = int(w) } // WithAPIKey returns a ClientOption that specifies an API key to be used @@ -233,3 +224,48 @@ type withRequestReason string func (w withRequestReason) Apply(o *internal.DialSettings) { o.RequestReason = string(w) } + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabled{} +} + +type withTelemetryDisabled struct{} + +func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} + +// ClientCertSource is a function that returns a TLS client certificate to be used +// when opening TLS connections. +// +// It follows the same semantics as crypto/tls.Config.GetClientCertificate. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// WithClientCertSource returns a ClientOption that specifies a +// callback function for obtaining a TLS client certificate. +// +// This option is used for supporting mTLS authentication, where the +// server validates the client certifcate when establishing a connection. +// +// The callback function will be invoked whenever the server requests a +// certificate from the client. Implementations of the callback function +// should try to ensure that a valid certificate can be repeatedly returned +// on demand for the entire life cycle of the transport client. If a nil +// Certificate is returned (i.e. no Certificate can be obtained), an error +// should be returned. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithClientCertSource(s ClientCertSource) ClientOption { + return withClientCertSource{s} +} + +type withClientCertSource struct{ s ClientCertSource } + +func (w withClientCertSource) Apply(o *internal.DialSettings) { + o.ClientCertSource = w.s +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 2a0d2746..e78776b2 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -21,12 +21,12 @@ } }, "basePath": "/storage/v1/", - "baseUrl": "https://www.googleapis.com/storage/v1/", + "baseUrl": "https://storage.googleapis.com/storage/v1/", "batchPath": "batch/storage/v1", "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"9eZ1uxVRThTDhLJCZHhqs3eQWz4/m18VxIxuaQHJN-C1B3-yQYvta24\"", + "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/5Ir-e9ddNPcr5skzvRsSnJlvTYg\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -450,6 +450,13 @@ "required": true, "type": "string" }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, "provisionalUserProject": { "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", "location": "query", @@ -1774,7 +1781,7 @@ "type": "string" }, "kmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", "location": "query", "type": "string" }, @@ -1819,6 +1826,11 @@ "required": true, "type": "string" }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, "destinationObject": { "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", "location": "path", @@ -2303,6 +2315,11 @@ "location": "query", "type": "string" }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2344,6 +2361,11 @@ "location": "query", "type": "string" }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2875,6 +2897,11 @@ "location": "query", "type": "string" }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2916,6 +2943,11 @@ "location": "query", "type": "string" }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -3054,6 +3086,7 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only" ] }, @@ -3196,8 +3229,8 @@ } } }, - "revision": "20190624", - "rootUrl": "https://www.googleapis.com/", + "revision": "20200611", + "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { "description": "A bucket.", @@ -3289,7 +3322,7 @@ "description": "The bucket's IAM configuration.", "properties": { "bucketPolicyOnly": { - "description": "The bucket's Bucket Policy Only configuration.", + "description": "The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.", "properties": { "enabled": { "description": "If set, access is controlled only by bucket-level or above IAM policies.", @@ -3372,6 +3405,21 @@ "format": "date", "type": "string" }, + "customTimeBefore": { + "description": "A timestamp in RFC 3339 format. This condition is satisfied when the custom time on an object is before this timestamp.", + "format": "date-time", + "type": "string" + }, + "daysSinceCustomTime": { + "description": "Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.", + "format": "int32", + "type": "integer" + }, + "daysSinceNoncurrentTime": { + "description": "Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.", + "format": "int32", + "type": "integer" + }, "isLive": { "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.", "type": "boolean" @@ -3381,12 +3429,17 @@ "type": "string" }, "matchesStorageClass": { - "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", + "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", "items": { "type": "string" }, "type": "array" }, + "noncurrentTimeBefore": { + "description": "A timestamp in RFC 3339 format. This condition is satisfied when the noncurrent time on an object is before this timestamp. This condition is relevant only for versioned objects.", + "format": "date-time", + "type": "string" + }, "numNewerVersions": { "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", "format": "int32", @@ -3483,7 +3536,7 @@ "type": "string" }, "storageClass": { - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", "type": "string" }, "timeCreated": { @@ -3519,6 +3572,17 @@ } }, "type": "object" + }, + "zoneAffinity": { + "description": "The zone or zones from which the bucket is intended to use zonal quota. Requests for data from outside the specified affinities are still allowed but won't be able to use zonal quota. The zone or zones need to be within the bucket location otherwise the requests will fail with a 400 Bad Request response.", + "items": { + "type": "string" + }, + "type": "array" + }, + "zoneSeparation": { + "description": "If set, objects placed in this bucket are required to be separated by disaster domain.", + "type": "boolean" } }, "type": "object" @@ -3990,6 +4054,11 @@ "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", "type": "string" }, + "customTime": { + "description": "A timestamp in RFC 3339 format specified by the user for an object.", + "format": "date-time", + "type": "string" + }, "customerEncryption": { "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", "properties": { @@ -4305,6 +4374,11 @@ "resourceId": { "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", "type": "string" + }, + "version": { + "description": "The IAM policy format version.", + "format": "int32", + "type": "integer" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 2eff6741..dc51a22e 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -55,9 +55,10 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" htransport "google.golang.org/api/transport/http" ) @@ -74,11 +75,12 @@ var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint const apiId = "storage:v1" const apiName = "storage" const apiVersion = "v1" -const basePath = "https://www.googleapis.com/storage/v1/" +const basePath = "https://storage.googleapis.com/storage/v1/" // OAuth2 scopes used by this API. const ( @@ -109,6 +111,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -371,9 +374,9 @@ type Bucket struct { // storageClass is specified for a newly-created object. This defines // how objects in the bucket are stored and determines the SLA and the // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, - // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value - // is not specified when the bucket is created, it will default to - // STANDARD. For more information, see storage classes. + // NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If + // this value is not specified when the bucket is created, it will + // default to STANDARD. For more information, see storage classes. StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the bucket in RFC 3339 format. @@ -390,6 +393,17 @@ type Bucket struct { // Static Website Examples for more information. Website *BucketWebsite `json:"website,omitempty"` + // ZoneAffinity: The zone or zones from which the bucket is intended to + // use zonal quota. Requests for data from outside the specified + // affinities are still allowed but won't be able to use zonal quota. + // The zone or zones need to be within the bucket location otherwise the + // requests will fail with a 400 Bad Request response. + ZoneAffinity []string `json:"zoneAffinity,omitempty"` + + // ZoneSeparation: If set, objects placed in this bucket are required to + // be separated by disaster domain. + ZoneSeparation bool `json:"zoneSeparation,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -522,7 +536,12 @@ func (s *BucketEncryption) MarshalJSON() ([]byte, error) { // BucketIamConfiguration: The bucket's IAM configuration. type BucketIamConfiguration struct { - // BucketPolicyOnly: The bucket's Bucket Policy Only configuration. + // BucketPolicyOnly: The bucket's uniform bucket-level access + // configuration. The feature was formerly known as Bucket Policy Only. + // For backward compatibility, this field will be populated with + // identical information as the uniformBucketLevelAccess field. We + // recommend using the uniformBucketLevelAccess field to enable and + // disable the feature. BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` // UniformBucketLevelAccess: The bucket's uniform bucket-level access @@ -553,8 +572,12 @@ func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BucketIamConfigurationBucketPolicyOnly: The bucket's Bucket Policy -// Only configuration. +// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform +// bucket-level access configuration. The feature was formerly known as +// Bucket Policy Only. For backward compatibility, this field will be +// populated with identical information as the uniformBucketLevelAccess +// field. We recommend using the uniformBucketLevelAccess field to +// enable and disable the feature. type BucketIamConfigurationBucketPolicyOnly struct { // Enabled: If set, access is controlled only by bucket-level or above // IAM policies. @@ -733,6 +756,24 @@ type BucketLifecycleRuleCondition struct { // is created before midnight of the specified date in UTC. CreatedBefore string `json:"createdBefore,omitempty"` + // CustomTimeBefore: A timestamp in RFC 3339 format. This condition is + // satisfied when the custom time on an object is before this timestamp. + CustomTimeBefore string `json:"customTimeBefore,omitempty"` + + // DaysSinceCustomTime: Number of days elapsed since the user-specified + // timestamp set on an object. The condition is satisfied if the days + // elapsed is at least this number. If no custom timestamp is specified + // on an object, the condition does not apply. + DaysSinceCustomTime int64 `json:"daysSinceCustomTime,omitempty"` + + // DaysSinceNoncurrentTime: Number of days elapsed since the noncurrent + // timestamp of an object. The condition is satisfied if the days + // elapsed is at least this number. This condition is relevant only for + // versioned objects. The value of the field must be a nonnegative + // integer. If it's zero, the object version will become eligible for + // Lifecycle action as soon as it becomes noncurrent. + DaysSinceNoncurrentTime int64 `json:"daysSinceNoncurrentTime,omitempty"` + // IsLive: Relevant only for versioned objects. If the value is true, // this condition matches live objects; if the value is false, it // matches archived objects. @@ -748,10 +789,15 @@ type BucketLifecycleRuleCondition struct { // MatchesStorageClass: Objects having any of the storage classes // specified by this condition will be matched. Values include - // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and + // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and // DURABLE_REDUCED_AVAILABILITY. MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + // NoncurrentTimeBefore: A timestamp in RFC 3339 format. This condition + // is satisfied when the noncurrent time on an object is before this + // timestamp. This condition is relevant only for versioned objects. + NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` + // NumNewerVersions: Relevant only for versioned objects. If the value // is N, this condition is satisfied when there are at least N versions // (including the live version) newer than this version of the object. @@ -1650,6 +1696,10 @@ type Object struct { // Practices. Crc32c string `json:"crc32c,omitempty"` + // CustomTime: A timestamp in RFC 3339 format specified by the user for + // an object. + CustomTime string `json:"customTime,omitempty"` + // CustomerEncryption: Metadata of customer-supplied encryption key, if // the object is encrypted by such a key. CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` @@ -2068,6 +2118,9 @@ type Policy struct { // generation can be denoted with #0. This field is ignored on input. ResourceId string `json:"resourceId,omitempty"` + // Version: The IAM policy format version. + Version int64 `json:"version,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2389,7 +2442,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2537,7 +2590,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2704,7 +2757,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2877,7 +2930,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3038,7 +3091,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3212,7 +3265,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3398,7 +3451,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3577,7 +3630,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3721,6 +3774,16 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // ProvisionalUserProject sets the optional parameter // "provisionalUserProject": The project to be billed for this request // if the target bucket is requester-pays bucket. @@ -3773,7 +3836,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3848,6 +3911,13 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, // "provisionalUserProject": { // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", // "location": "query", @@ -3983,7 +4053,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4240,7 +4310,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4450,7 +4520,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4685,7 +4755,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4914,7 +4984,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5089,7 +5159,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5329,7 +5399,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5541,7 +5611,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5658,7 +5728,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5806,7 +5876,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5974,7 +6044,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6164,7 +6234,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6337,7 +6407,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6511,7 +6581,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6683,7 +6753,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6831,7 +6901,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7001,7 +7071,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7176,7 +7246,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7349,7 +7419,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7521,7 +7591,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7712,7 +7782,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7909,7 +7979,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8094,7 +8164,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8292,7 +8362,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8477,11 +8547,9 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object -// metadata's kms_key_name value, if any. +// KmsKeyName sets the optional parameter "kmsKeyName": Not currently +// supported. Specifying the parameter causes the request to fail with +// status code 400 - Bad Request. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c @@ -8529,7 +8597,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8648,7 +8716,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", // "location": "query", // "type": "string" // }, @@ -8705,6 +8773,17 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat return c } +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. @@ -8861,7 +8940,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8944,6 +9023,11 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "required": true, // "type": "string" // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", // "location": "path", @@ -9184,7 +9268,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9417,7 +9501,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9667,7 +9751,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9984,7 +10068,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9999,7 +10083,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") if c.mediaInfo_ != nil { - urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } if body == nil { @@ -10018,7 +10102,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. @@ -10234,6 +10318,15 @@ func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { return c } +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one // instance of delimiter will have their metadata included in items in @@ -10287,6 +10380,15 @@ func (c *ObjectsListCall) ProvisionalUserProject(provisionalUserProject string) return c } +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { @@ -10339,7 +10441,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10419,6 +10521,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "includeTrailingDelimiter": { // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", // "location": "query", @@ -10460,6 +10567,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10646,7 +10758,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11038,7 +11150,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11341,7 +11453,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11541,7 +11653,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11802,7 +11914,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12007,6 +12119,15 @@ func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { return c } +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one // instance of delimiter will have their metadata included in items in @@ -12060,6 +12181,15 @@ func (c *ObjectsWatchAllCall) ProvisionalUserProject(provisionalUserProject stri return c } +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { @@ -12102,7 +12232,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12184,6 +12314,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "location": "query", // "type": "string" // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "includeTrailingDelimiter": { // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", // "location": "query", @@ -12225,6 +12360,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "location": "query", // "type": "string" // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -12308,7 +12448,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12458,7 +12598,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12593,7 +12733,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12689,6 +12829,7 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only" // ] // } @@ -12792,7 +12933,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12987,7 +13128,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13164,7 +13305,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go new file mode 100644 index 00000000..c03af65f --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -0,0 +1,110 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +var ( + defaultSourceOnce sync.Once + defaultSource Source + defaultSourceErr error +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// DefaultSource returns a certificate source that execs the command specified +// in the file at ~/.secureConnect/context_aware_metadata.json +// +// If that file does not exist, a nil source is returned. +func DefaultSource() (Source, error) { + defaultSourceOnce.Do(func() { + defaultSource, defaultSourceErr = newSecureConnectSource() + }) + return defaultSource, defaultSourceErr +} + +type secureConnectSource struct { + metadata secureConnectMetadata +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// newSecureConnectSource creates a secureConnectSource by reading the well-known file. +func newSecureConnectSource() (Source, error) { + user, err := user.Current() + if err != nil { + // Ignore. + return nil, nil + } + filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) + file, err := ioutil.ReadFile(filename) + if os.IsNotExist(err) { + // Ignore. + return nil, nil + } + if err != nil { + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + // TODO(cbro): consider caching valid certificates rather than exec'ing every time. + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + // TODO(cbro): read stderr for error message? Might contain sensitive info. + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_go113.go new file mode 100644 index 00000000..924f2704 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_go113.go @@ -0,0 +1,20 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// It returns nil if the RoundTripper can't be cloned or coerced to +// *http.Transport. +func clonedTransport(rt http.RoundTripper) *http.Transport { + t, ok := rt.(*http.Transport) + if !ok { + return nil + } + return t.Clone() +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go new file mode 100644 index 00000000..3cb16c6c --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// For versions of Go <1.13, this is not supported, so return nil. +func clonedTransport(rt http.RoundTripper) *http.Transport { + return nil +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c0d8bf20..44503014 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -1,16 +1,6 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package http supports network connections to HTTP servers. // This package is not intended for use by end developers. Use the @@ -19,17 +9,30 @@ package http import ( "context" + "crypto/tls" "errors" + "net" "net/http" + "net/url" + "os" + "strings" + "time" "go.opencensus.io/plugin/ochttp" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" "google.golang.org/api/option" + "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" ) +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" +) + // NewClient returns an HTTP client for use communicating with a Google cloud // service, configured with the given ClientOptions. It also returns the endpoint // for the service as specified in the options. @@ -38,15 +41,23 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? if settings.HTTPClient != nil { - return settings.HTTPClient, settings.Endpoint, nil + return settings.HTTPClient, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) if err != nil { return nil, "", err } - return &http.Client{Transport: trans}, settings.Endpoint, nil + return &http.Client{Transport: trans}, endpoint, nil } // NewTransport creates an http.RoundTripper for use communicating with a Google @@ -63,14 +74,14 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl } func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { - trans := base - trans = parameterTransport{ - base: trans, + paramTransport := ¶meterTransport{ + base: base, userAgent: settings.UserAgent, quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } - trans = addOCTransport(trans) + var trans http.RoundTripper = paramTransport + trans = addOCTransport(trans, settings) switch { case settings.NoAuth: // Do nothing. @@ -84,9 +95,17 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + if paramTransport.quotaProject == "" { + paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) + } + + ts := creds.TokenSource + if settings.TokenSource != nil { + ts = settings.TokenSource + } trans = &oauth2.Transport{ Base: trans, - Source: creds.TokenSource, + Source: ts, } } return trans, nil @@ -114,21 +133,20 @@ type parameterTransport struct { base http.RoundTripper } -func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base if rt == nil { return nil, errors.New("transport: no Transport specified") } - if t.userAgent == "" { - return rt.RoundTrip(req) - } newReq := *req newReq.Header = make(http.Header) for k, vv := range req.Header { newReq.Header[k] = vv } - // TODO(cbro): append to existing User-Agent header? - newReq.Header.Set("User-Agent", t.userAgent) + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } // Attach system parameters into the header if t.quotaProject != "" { @@ -145,17 +163,142 @@ func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) var appengineUrlfetchHook func(context.Context) http.RoundTripper // defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. -func defaultBaseTransport(ctx context.Context) http.RoundTripper { +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } - return http.DefaultTransport + + // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, + // which is increased due to reported performance issues under load in the GCS + // client. Transport.Clone is only available in Go 1.13 and up. + trans := clonedTransport(http.DefaultTransport) + if trans == nil { + trans = fallbackBaseTransport() + } + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + + return trans +} + +// fallbackBaseTransport is used in google.protobuf.MethodOptions + 1, // 1: google.api.http:type_name -> google.api.HttpRule + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor_c591c5aa9fb79aab) } - -var fileDescriptor_c591c5aa9fb79aab = []byte{ - // 208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, - 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, - 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, - 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, - 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, - 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, - 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, - 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, - 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, - 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +func init() { file_google_api_annotations_proto_init() } +func file_google_api_annotations_proto_init() { + if File_google_api_annotations_proto != nil { + return + } + file_google_api_http_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_annotations_proto_goTypes, + DependencyIndexes: file_google_api_annotations_proto_depIdxs, + ExtensionInfos: file_google_api_annotations_proto_extTypes, + }.Build() + File_google_api_annotations_proto = out.File + file_google_api_annotations_proto_rawDesc = nil + file_google_api_annotations_proto_goTypes = nil + file_google_api_annotations_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 867fc0c3..3832df0b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,79 +1,219 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/client.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 -var E_MethodSignature = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MethodOptions)(nil), - ExtensionType: ([]string)(nil), - Field: 1051, - Name: "google.api.method_signature", - Tag: "bytes,1051,rep,name=method_signature", - Filename: "google/api/client.proto", +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", + }, } -var E_DefaultHost = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1049, - Name: "google.api.default_host", - Tag: "bytes,1049,opt,name=default_host", - Filename: "google/api/client.proto", -} +// Extension fields to descriptor.MethodOptions. +var ( + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + // + // repeated string method_signature = 1051; + E_MethodSignature = &file_google_api_client_proto_extTypes[0] +) -var E_OauthScopes = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1050, - Name: "google.api.oauth_scopes", - Tag: "bytes,1050,opt,name=oauth_scopes", - Filename: "google/api/client.proto", -} +// Extension fields to descriptor.ServiceOptions. +var ( + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + // + // optional string default_host = 1049; + E_DefaultHost = &file_google_api_client_proto_extTypes[1] + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + // + // optional string oauth_scopes = 1050; + E_OauthScopes = &file_google_api_client_proto_extTypes[2] +) + +var File_google_api_client_proto protoreflect.FileDescriptor -func init() { - proto.RegisterExtension(E_MethodSignature) - proto.RegisterExtension(E_DefaultHost) - proto.RegisterExtension(E_OauthScopes) +var file_google_api_client_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, + 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, + 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/api/client.proto", fileDescriptor_78f2c6f7c3a942c1) } +var file_google_api_client_proto_goTypes = []interface{}{ + (*descriptor.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*descriptor.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 0, // [0:3] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} -var fileDescriptor_78f2c6f7c3a942c1 = []byte{ - // 262 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x3f, 0x4f, 0xc3, 0x30, - 0x10, 0xc5, 0x55, 0x40, 0xa8, 0x75, 0x11, 0xa0, 0x2c, 0x20, 0x06, 0xc8, 0xd8, 0xc9, 0x1e, 0xd8, - 0xca, 0xd4, 0x76, 0xe0, 0x8f, 0x84, 0x88, 0x9a, 0x8d, 0x25, 0x72, 0x9d, 0xab, 0x63, 0x29, 0xf5, - 0x59, 0xf6, 0x85, 0xef, 0x02, 0x6c, 0x7c, 0x52, 0x54, 0xc7, 0x11, 0x48, 0x0c, 0x6c, 0x27, 0xbd, - 0xf7, 0xfb, 0x9d, 0xf4, 0xd8, 0x85, 0x46, 0xd4, 0x2d, 0x08, 0xe9, 0x8c, 0x50, 0xad, 0x01, 0x4b, - 0xdc, 0x79, 0x24, 0xcc, 0x58, 0x1f, 0x70, 0xe9, 0xcc, 0x55, 0x9e, 0x4a, 0x31, 0xd9, 0x74, 0x5b, - 0x51, 0x43, 0x50, 0xde, 0x38, 0x42, 0xdf, 0xb7, 0xe7, 0x4f, 0xec, 0x7c, 0x07, 0xd4, 0x60, 0x5d, - 0x05, 0xa3, 0xad, 0xa4, 0xce, 0x43, 0x76, 0xcd, 0x93, 0x62, 0xc0, 0xf8, 0x73, 0xac, 0xbc, 0x38, - 0x32, 0x68, 0xc3, 0xe5, 0xe7, 0x38, 0x3f, 0x9c, 0x4d, 0xd6, 0x67, 0x3d, 0x58, 0x0e, 0xdc, 0x7c, - 0xc5, 0x4e, 0x6a, 0xd8, 0xca, 0xae, 0xa5, 0xaa, 0xc1, 0x40, 0xd9, 0xcd, 0x1f, 0x4f, 0x09, 0xfe, - 0xcd, 0x28, 0x18, 0x44, 0xef, 0xe3, 0x7c, 0x34, 0x9b, 0xac, 0xa7, 0x89, 0x7a, 0xc0, 0x40, 0x7b, - 0x09, 0xca, 0x8e, 0x9a, 0x2a, 0x28, 0x74, 0x10, 0xfe, 0x97, 0x7c, 0x24, 0x49, 0xa4, 0xca, 0x08, - 0x2d, 0x0d, 0x3b, 0x55, 0xb8, 0xe3, 0x3f, 0x4b, 0x2c, 0xa7, 0xab, 0xb8, 0x51, 0xb1, 0x97, 0x14, - 0xa3, 0xd7, 0x45, 0x8a, 0x34, 0xb6, 0xd2, 0x6a, 0x8e, 0x5e, 0x0b, 0x0d, 0x36, 0xbe, 0x10, 0x7d, - 0x24, 0x9d, 0x09, 0x71, 0x5c, 0x69, 0x2d, 0x92, 0x8c, 0xbf, 0xee, 0x7e, 0xdd, 0x5f, 0x07, 0x47, - 0xf7, 0x8b, 0xe2, 0x71, 0x73, 0x1c, 0xa1, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xc2, - 0xcf, 0x71, 0x90, 0x01, 0x00, 0x00, +func init() { file_google_api_client_proto_init() } +func file_google_api_client_proto_init() { + if File_google_api_client_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_client_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_client_proto_goTypes, + DependencyIndexes: file_google_api_client_proto_depIdxs, + ExtensionInfos: file_google_api_client_proto_extTypes, + }.Build() + File_google_api_client_proto = out.File + file_google_api_client_proto_rawDesc = nil + file_google_api_client_proto_goTypes = nil + file_google_api_client_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 31f87dd0..0038b20e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,26 +1,45 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/field_behavior.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // An indicator of the behavior of a given field (for example, that a field // is required in requests, or given as output but ignored as input). @@ -56,67 +75,164 @@ const ( FieldBehavior_IMMUTABLE FieldBehavior = 5 ) -var FieldBehavior_name = map[int32]string{ - 0: "FIELD_BEHAVIOR_UNSPECIFIED", - 1: "OPTIONAL", - 2: "REQUIRED", - 3: "OUTPUT_ONLY", - 4: "INPUT_ONLY", - 5: "IMMUTABLE", -} +// Enum value maps for FieldBehavior. +var ( + FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + } +) -var FieldBehavior_value = map[string]int32{ - "FIELD_BEHAVIOR_UNSPECIFIED": 0, - "OPTIONAL": 1, - "REQUIRED": 2, - "OUTPUT_ONLY": 3, - "INPUT_ONLY": 4, - "IMMUTABLE": 5, +func (x FieldBehavior) Enum() *FieldBehavior { + p := new(FieldBehavior) + *p = x + return p } func (x FieldBehavior) String() string { - return proto.EnumName(FieldBehavior_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() +} + +func (FieldBehavior) Type() protoreflect.EnumType { + return &file_google_api_field_behavior_proto_enumTypes[0] } +func (x FieldBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldBehavior.Descriptor instead. func (FieldBehavior) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4648f18fd5079967, []int{0} + return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} +} + +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", + }, +} + +// Extension fields to descriptor.FieldOptions. +var ( + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + // + // repeated google.api.FieldBehavior field_behavior = 1052; + E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] +) + +var File_google_api_field_behavior_proto protoreflect.FileDescriptor + +var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, + 0x7b, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, + 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, + 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x3a, 0x60, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, + 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var E_FieldBehavior = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: ([]FieldBehavior)(nil), - Field: 1052, - Name: "google.api.field_behavior", - Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", - Filename: "google/api/field_behavior.proto", +var ( + file_google_api_field_behavior_proto_rawDescOnce sync.Once + file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc +) + +func file_google_api_field_behavior_proto_rawDescGZIP() []byte { + file_google_api_field_behavior_proto_rawDescOnce.Do(func() { + file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) + }) + return file_google_api_field_behavior_proto_rawDescData } -func init() { - proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value) - proto.RegisterExtension(E_FieldBehavior) +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_behavior_proto_goTypes = []interface{}{ + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptor.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_google_api_field_behavior_proto_depIdxs = []int32{ + 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions + 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_4648f18fd5079967) } - -var fileDescriptor_4648f18fd5079967 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4f, 0xb3, 0x30, - 0x1c, 0xc7, 0x9f, 0xfd, 0x79, 0xcc, 0xac, 0x0e, 0x49, 0x4f, 0xba, 0x44, 0xdd, 0xd1, 0x78, 0x28, - 0x89, 0xde, 0xf4, 0x04, 0xae, 0xd3, 0x26, 0x8c, 0x56, 0x04, 0x13, 0xbd, 0x60, 0xb7, 0xb1, 0xda, - 0x64, 0xd2, 0x06, 0xd0, 0x8b, 0x6f, 0xc5, 0x93, 0xaf, 0xd4, 0xd0, 0x31, 0x85, 0x5b, 0xbf, 0xf9, - 0x7d, 0xfa, 0xeb, 0xe7, 0x5b, 0x70, 0x2a, 0x94, 0x12, 0xeb, 0xd4, 0xe1, 0x5a, 0x3a, 0x2b, 0x99, - 0xae, 0x97, 0xc9, 0x3c, 0x7d, 0xe5, 0x1f, 0x52, 0xe5, 0x48, 0xe7, 0xaa, 0x54, 0x10, 0x6c, 0x00, - 0xc4, 0xb5, 0x1c, 0x8d, 0x6b, 0xd8, 0x4c, 0xe6, 0xef, 0x2b, 0x67, 0x99, 0x16, 0x8b, 0x5c, 0xea, - 0x72, 0x4b, 0x9f, 0x7f, 0x82, 0xe1, 0xb4, 0xda, 0xe2, 0xd5, 0x4b, 0xe0, 0x09, 0x18, 0x4d, 0x09, - 0xf6, 0x27, 0x89, 0x87, 0xef, 0xdc, 0x47, 0x42, 0xc3, 0x24, 0x0e, 0x1e, 0x18, 0xbe, 0x21, 0x53, - 0x82, 0x27, 0xf6, 0x3f, 0xb8, 0x0f, 0x06, 0x94, 0x45, 0x84, 0x06, 0xae, 0x6f, 0x77, 0xaa, 0x14, - 0xe2, 0xfb, 0x98, 0x84, 0x78, 0x62, 0x77, 0xe1, 0x01, 0xd8, 0xa3, 0x71, 0xc4, 0xe2, 0x28, 0xa1, - 0x81, 0xff, 0x64, 0xf7, 0xa0, 0x05, 0x00, 0x09, 0x7e, 0x73, 0x1f, 0x0e, 0xc1, 0x2e, 0x99, 0xcd, - 0xe2, 0xc8, 0xf5, 0x7c, 0x6c, 0xff, 0xbf, 0x7a, 0x01, 0x56, 0xbb, 0x02, 0x3c, 0x46, 0xb5, 0xfd, - 0xd6, 0x18, 0x19, 0x3b, 0xaa, 0x4b, 0xa9, 0xb2, 0xe2, 0xf0, 0x6b, 0x30, 0xee, 0x9d, 0x59, 0x17, - 0x47, 0xe8, 0xaf, 0x23, 0x6a, 0xe9, 0x87, 0xc3, 0x55, 0x33, 0x7a, 0x1a, 0x58, 0x0b, 0xf5, 0xd6, - 0xc0, 0x3d, 0xd8, 0xe2, 0x59, 0xf5, 0x0c, 0xeb, 0x3c, 0xbb, 0x35, 0x21, 0xd4, 0x9a, 0x67, 0x02, - 0xa9, 0x5c, 0x38, 0x22, 0xcd, 0x8c, 0x84, 0xb3, 0x19, 0x71, 0x2d, 0x0b, 0xf3, 0xe9, 0x3c, 0xcb, - 0x54, 0xc9, 0x8d, 0xcf, 0x75, 0xe3, 0xfc, 0xdd, 0xed, 0xdf, 0xba, 0x8c, 0xcc, 0x77, 0xcc, 0xa5, - 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x94, 0x57, 0x94, 0xa8, 0x01, 0x00, 0x00, +func init() { file_google_api_field_behavior_proto_init() } +func file_google_api_field_behavior_proto_init() { + if File_google_api_field_behavior_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_behavior_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_behavior_proto_goTypes, + DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, + EnumInfos: file_google_api_field_behavior_proto_enumTypes, + ExtensionInfos: file_google_api_field_behavior_proto_extTypes, + }.Build() + File_google_api_field_behavior_proto = out.File + file_google_api_field_behavior_proto_rawDesc = nil + file_google_api_field_behavior_proto_goTypes = nil + file_google_api_field_behavior_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index a6387037..955bc56a 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,30 +1,53 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/http.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. type Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A list of HTTP configuration rules that apply to individual API methods. // // **NOTE:** All service configuration rules follow "last one wins" order. @@ -35,47 +58,51 @@ type Http struct { // // The default behavior is to not decode RFC 6570 reserved characters in multi // segment matches. - FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` } -func (m *Http) Reset() { *m = Http{} } -func (m *Http) String() string { return proto.CompactTextString(m) } -func (*Http) ProtoMessage() {} -func (*Http) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9994be407cdcc9, []int{0} +func (x *Http) Reset() { + *x = Http{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Http) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Http.Unmarshal(m, b) -} -func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Http.Marshal(b, m, deterministic) +func (x *Http) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Http) XXX_Merge(src proto.Message) { - xxx_messageInfo_Http.Merge(m, src) -} -func (m *Http) XXX_Size() int { - return xxx_messageInfo_Http.Size(m) -} -func (m *Http) XXX_DiscardUnknown() { - xxx_messageInfo_Http.DiscardUnknown(m) + +func (*Http) ProtoMessage() {} + +func (x *Http) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Http proto.InternalMessageInfo +// Deprecated: Use Http.ProtoReflect.Descriptor instead. +func (*Http) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{0} +} -func (m *Http) GetRules() []*HttpRule { - if m != nil { - return m.Rules +func (x *Http) GetRules() []*HttpRule { + if x != nil { + return x.Rules } return nil } -func (m *Http) GetFullyDecodeReservedExpansion() bool { - if m != nil { - return m.FullyDecodeReservedExpansion +func (x *Http) GetFullyDecodeReservedExpansion() bool { + if x != nil { + return x.FullyDecodeReservedExpansion } return false } @@ -350,6 +377,10 @@ func (m *Http) GetFullyDecodeReservedExpansion() bool { // the request or response body to a repeated field. However, some gRPC // Transcoding implementations may not support this feature. type HttpRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Selects a method to which this rule applies. // // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. @@ -358,7 +389,7 @@ type HttpRule struct { // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. // - // Types that are valid to be assigned to Pattern: + // Types that are assignable to Pattern: // *HttpRule_Get // *HttpRule_Put // *HttpRule_Post @@ -383,84 +414,48 @@ type HttpRule struct { // Additional HTTP bindings for the selector. Nested bindings must // not contain an `additional_bindings` field themselves (that is, // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` } -func (m *HttpRule) Reset() { *m = HttpRule{} } -func (m *HttpRule) String() string { return proto.CompactTextString(m) } -func (*HttpRule) ProtoMessage() {} -func (*HttpRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9994be407cdcc9, []int{1} -} - -func (m *HttpRule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HttpRule.Unmarshal(m, b) -} -func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) -} -func (m *HttpRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_HttpRule.Merge(m, src) -} -func (m *HttpRule) XXX_Size() int { - return xxx_messageInfo_HttpRule.Size(m) -} -func (m *HttpRule) XXX_DiscardUnknown() { - xxx_messageInfo_HttpRule.DiscardUnknown(m) -} - -var xxx_messageInfo_HttpRule proto.InternalMessageInfo - -func (m *HttpRule) GetSelector() string { - if m != nil { - return m.Selector +func (x *HttpRule) Reset() { + *x = HttpRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type isHttpRule_Pattern interface { - isHttpRule_Pattern() +func (x *HttpRule) String() string { + return protoimpl.X.MessageStringOf(x) } -type HttpRule_Get struct { - Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` -} - -type HttpRule_Put struct { - Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` -} - -type HttpRule_Post struct { - Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` -} +func (*HttpRule) ProtoMessage() {} -type HttpRule_Delete struct { - Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +func (x *HttpRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type HttpRule_Patch struct { - Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. +func (*HttpRule) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{1} } -type HttpRule_Custom struct { - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +func (x *HttpRule) GetSelector() string { + if x != nil { + return x.Selector + } + return "" } -func (*HttpRule_Get) isHttpRule_Pattern() {} - -func (*HttpRule_Put) isHttpRule_Pattern() {} - -func (*HttpRule_Post) isHttpRule_Pattern() {} - -func (*HttpRule_Delete) isHttpRule_Pattern() {} - -func (*HttpRule_Patch) isHttpRule_Pattern() {} - -func (*HttpRule_Custom) isHttpRule_Pattern() {} - func (m *HttpRule) GetPattern() isHttpRule_Pattern { if m != nil { return m.Pattern @@ -468,166 +463,321 @@ func (m *HttpRule) GetPattern() isHttpRule_Pattern { return nil } -func (m *HttpRule) GetGet() string { - if x, ok := m.GetPattern().(*HttpRule_Get); ok { +func (x *HttpRule) GetGet() string { + if x, ok := x.GetPattern().(*HttpRule_Get); ok { return x.Get } return "" } -func (m *HttpRule) GetPut() string { - if x, ok := m.GetPattern().(*HttpRule_Put); ok { +func (x *HttpRule) GetPut() string { + if x, ok := x.GetPattern().(*HttpRule_Put); ok { return x.Put } return "" } -func (m *HttpRule) GetPost() string { - if x, ok := m.GetPattern().(*HttpRule_Post); ok { +func (x *HttpRule) GetPost() string { + if x, ok := x.GetPattern().(*HttpRule_Post); ok { return x.Post } return "" } -func (m *HttpRule) GetDelete() string { - if x, ok := m.GetPattern().(*HttpRule_Delete); ok { +func (x *HttpRule) GetDelete() string { + if x, ok := x.GetPattern().(*HttpRule_Delete); ok { return x.Delete } return "" } -func (m *HttpRule) GetPatch() string { - if x, ok := m.GetPattern().(*HttpRule_Patch); ok { +func (x *HttpRule) GetPatch() string { + if x, ok := x.GetPattern().(*HttpRule_Patch); ok { return x.Patch } return "" } -func (m *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := m.GetPattern().(*HttpRule_Custom); ok { +func (x *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := x.GetPattern().(*HttpRule_Custom); ok { return x.Custom } return nil } -func (m *HttpRule) GetBody() string { - if m != nil { - return m.Body +func (x *HttpRule) GetBody() string { + if x != nil { + return x.Body } return "" } -func (m *HttpRule) GetResponseBody() string { - if m != nil { - return m.ResponseBody +func (x *HttpRule) GetResponseBody() string { + if x != nil { + return x.ResponseBody } return "" } -func (m *HttpRule) GetAdditionalBindings() []*HttpRule { - if m != nil { - return m.AdditionalBindings +func (x *HttpRule) GetAdditionalBindings() []*HttpRule { + if x != nil { + return x.AdditionalBindings } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*HttpRule) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } +type isHttpRule_Pattern interface { + isHttpRule_Pattern() } +type HttpRule_Get struct { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + // Maps to HTTP PUT. Used for replacing a resource. + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + // Maps to HTTP POST. Used for creating a resource or performing an action. + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + // Maps to HTTP DELETE. Used for deleting a resource. + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + // Maps to HTTP PATCH. Used for updating a resource. + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + // A custom pattern is used for defining custom HTTP verb. type CustomHttpPattern struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of this custom HTTP verb. Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` } -func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } -func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } -func (*CustomHttpPattern) ProtoMessage() {} -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9994be407cdcc9, []int{2} +func (x *CustomHttpPattern) Reset() { + *x = CustomHttpPattern{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) +func (x *CustomHttpPattern) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) -} -func (m *CustomHttpPattern) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomHttpPattern.Merge(m, src) -} -func (m *CustomHttpPattern) XXX_Size() int { - return xxx_messageInfo_CustomHttpPattern.Size(m) -} -func (m *CustomHttpPattern) XXX_DiscardUnknown() { - xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) + +func (*CustomHttpPattern) ProtoMessage() {} + +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{2} +} -func (m *CustomHttpPattern) GetKind() string { - if m != nil { - return m.Kind +func (x *CustomHttpPattern) GetKind() string { + if x != nil { + return x.Kind } return "" } -func (m *CustomHttpPattern) GetPath() string { - if m != nil { - return m.Path +func (x *CustomHttpPattern) GetPath() string { + if x != nil { + return x.Path } return "" } -func init() { - proto.RegisterType((*Http)(nil), "google.api.Http") - proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") - proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") -} - -func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_ff9994be407cdcc9) } - -var fileDescriptor_ff9994be407cdcc9 = []byte{ - // 419 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, - 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, - 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, - 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, - 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, - 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, - 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, - 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, - 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, - 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, - 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, - 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, - 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, - 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, - 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, - 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, - 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, - 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, - 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, - 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, - 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, - 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, - 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, - 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, - 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, - 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, - 0x02, 0x00, 0x00, +var File_google_api_http_proto protoreflect.FileDescriptor + +var file_google_api_http_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, + 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, + 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_http_proto_rawDescOnce sync.Once + file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc +) + +func file_google_api_http_proto_rawDescGZIP() []byte { + file_google_api_http_proto_rawDescOnce.Do(func() { + file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) + }) + return file_google_api_http_proto_rawDescData +} + +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_api_http_proto_goTypes = []interface{}{ + (*Http)(nil), // 0: google.api.Http + (*HttpRule)(nil), // 1: google.api.HttpRule + (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern +} +var file_google_api_http_proto_depIdxs = []int32{ + 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule + 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern + 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_api_http_proto_init() } +func file_google_api_http_proto_init() { + if File_google_api_http_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomHttpPattern); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_http_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_http_proto_goTypes, + DependencyIndexes: file_google_api_http_proto_depIdxs, + MessageInfos: file_google_api_http_proto_msgTypes, + }.Build() + File_google_api_http_proto = out.File + file_google_api_http_proto_rawDesc = nil + file_google_api_http_proto_goTypes = nil + file_google_api_http_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index af057b90..ff6147fe 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,26 +1,45 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/resource.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // A description of the historical or future-looking state of the // resource pattern. @@ -38,24 +57,45 @@ const ( ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 ) -var ResourceDescriptor_History_name = map[int32]string{ - 0: "HISTORY_UNSPECIFIED", - 1: "ORIGINALLY_SINGLE_PATTERN", - 2: "FUTURE_MULTI_PATTERN", -} +// Enum value maps for ResourceDescriptor_History. +var ( + ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", + } + ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, + } +) -var ResourceDescriptor_History_value = map[string]int32{ - "HISTORY_UNSPECIFIED": 0, - "ORIGINALLY_SINGLE_PATTERN": 1, - "FUTURE_MULTI_PATTERN": 2, +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { + p := new(ResourceDescriptor_History) + *p = x + return p } func (x ResourceDescriptor_History) String() string { - return proto.EnumName(ResourceDescriptor_History_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceDescriptor_History) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[0] +} + +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_History.Descriptor instead. func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_465e9122405d1bb5, []int{0, 0} + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} } // A simple descriptor of a resource type. @@ -66,31 +106,111 @@ func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { // // Example: // -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// name_descriptor: { +// pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// name_descriptor: +// - pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" // // Sometimes, resources have multiple patterns, typically because they can // live under multiple parents. // // Example: // -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// name_descriptor: { +// pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// name_descriptor: { +// pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// } +// name_descriptor: { +// pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// } +// name_descriptor: { +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// name_descriptor: +// - pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// - pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// - pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// - pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// +// For flexible resources, the resource name doesn't contain parent names, but +// the resource itself has parents for policy evaluation. +// +// Example: +// +// message Shelf { +// option (google.api.resource) = { +// type: "library.googleapis.com/Shelf" +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// } +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'library.googleapis.com/Shelf' +// name_descriptor: +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The resource type. It must be in the format of // {service_name}/{resource_type_kind}. The `resource_type_kind` must be // singular and must not include version numbers. @@ -102,11 +222,20 @@ type ResourceDescriptor struct { // should use PascalCase (UpperCamelCase). The maximum number of // characters allowed for the `resource_type_kind` is 100. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Optional. The valid resource name pattern(s) for this resource type. + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; // // Examples: - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" // // The components in braces correspond to the IDs for each resource in the // hierarchy. It is expected that, if multiple patterns are provided, @@ -119,199 +248,397 @@ type ResourceDescriptor struct { // Optional. The historical or future-looking state of the resource pattern. // // Example: - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + // concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` } -func (m *ResourceDescriptor) Reset() { *m = ResourceDescriptor{} } -func (m *ResourceDescriptor) String() string { return proto.CompactTextString(m) } -func (*ResourceDescriptor) ProtoMessage() {} -func (*ResourceDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_465e9122405d1bb5, []int{0} +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResourceDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceDescriptor.Unmarshal(m, b) -} -func (m *ResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceDescriptor.Marshal(b, m, deterministic) -} -func (m *ResourceDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceDescriptor.Merge(m, src) +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResourceDescriptor) XXX_Size() int { - return xxx_messageInfo_ResourceDescriptor.Size(m) -} -func (m *ResourceDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceDescriptor.DiscardUnknown(m) + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ResourceDescriptor proto.InternalMessageInfo +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0} +} -func (m *ResourceDescriptor) GetType() string { - if m != nil { - return m.Type +func (x *ResourceDescriptor) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ResourceDescriptor) GetPattern() []string { - if m != nil { - return m.Pattern +func (x *ResourceDescriptor) GetPattern() []string { + if x != nil { + return x.Pattern } return nil } -func (m *ResourceDescriptor) GetNameField() string { - if m != nil { - return m.NameField +func (x *ResourceDescriptor) GetNameField() string { + if x != nil { + return x.NameField } return "" } -func (m *ResourceDescriptor) GetHistory() ResourceDescriptor_History { - if m != nil { - return m.History +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if x != nil { + return x.History } return ResourceDescriptor_HISTORY_UNSPECIFIED } -// Defines a proto annotation that describes a field that refers to a resource. +func (x *ResourceDescriptor) GetPlural() string { + if x != nil { + return x.Plural + } + return "" +} + +func (x *ResourceDescriptor) GetSingular() string { + if x != nil { + return x.Singular + } + return "" +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. type ResourceReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The resource type that the annotated field references. // // Example: // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type = "pubsub.googleapis.com/Topic" - // }]; - // } + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The resource type of a child collection that the annotated field - // references. This is useful for `parent` fields where a resource has more - // than one possible type of parent. + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. // // Example: // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` } -func (m *ResourceReference) Reset() { *m = ResourceReference{} } -func (m *ResourceReference) String() string { return proto.CompactTextString(m) } -func (*ResourceReference) ProtoMessage() {} -func (*ResourceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_465e9122405d1bb5, []int{1} +func (x *ResourceReference) Reset() { + *x = ResourceReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResourceReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceReference.Unmarshal(m, b) +func (x *ResourceReference) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic) -} -func (m *ResourceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceReference.Merge(m, src) -} -func (m *ResourceReference) XXX_Size() int { - return xxx_messageInfo_ResourceReference.Size(m) -} -func (m *ResourceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceReference.DiscardUnknown(m) + +func (*ResourceReference) ProtoMessage() {} + +func (x *ResourceReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ResourceReference proto.InternalMessageInfo +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. +func (*ResourceReference) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{1} +} -func (m *ResourceReference) GetType() string { - if m != nil { - return m.Type +func (x *ResourceReference) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ResourceReference) GetChildType() string { - if m != nil { - return m.ChildType +func (x *ResourceReference) GetChildType() string { + if x != nil { + return x.ChildType } return "" } -var E_ResourceReference = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*ResourceReference)(nil), - Field: 1055, - Name: "google.api.resource_reference", - Tag: "bytes,1055,opt,name=resource_reference", - Filename: "google/api/resource.proto", +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", + }, +} + +// Extension fields to descriptor.FieldOptions. +var ( + // An annotation that describes a resource reference, see + // [ResourceReference][]. + // + // optional google.api.ResourceReference resource_reference = 1055; + E_ResourceReference = &file_google_api_resource_proto_extTypes[0] +) + +// Extension fields to descriptor.FileOptions. +var ( + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + // + // repeated google.api.ResourceDescriptor resource_definition = 1053; + E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] +) + +// Extension fields to descriptor.MessageOptions. +var ( + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + // + // optional google.api.ResourceDescriptor resource = 1053; + E_Resource = &file_google_api_resource_proto_extTypes[2] +) + +var File_google_api_resource_proto protoreflect.FileDescriptor + +var file_google_api_resource_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, + 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x22, 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, + 0x0a, 0x13, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, + 0x4e, 0x41, 0x4c, 0x4c, 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, + 0x54, 0x45, 0x52, 0x4e, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, + 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, + 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var E_Resource = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource", - Tag: "bytes,1053,opt,name=resource", - Filename: "google/api/resource.proto", +var ( + file_google_api_resource_proto_rawDescOnce sync.Once + file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc +) + +func file_google_api_resource_proto_rawDescGZIP() []byte { + file_google_api_resource_proto_rawDescOnce.Do(func() { + file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) + }) + return file_google_api_resource_proto_rawDescData } -func init() { - proto.RegisterEnum("google.api.ResourceDescriptor_History", ResourceDescriptor_History_name, ResourceDescriptor_History_value) - proto.RegisterType((*ResourceDescriptor)(nil), "google.api.ResourceDescriptor") - proto.RegisterType((*ResourceReference)(nil), "google.api.ResourceReference") - proto.RegisterExtension(E_ResourceReference) - proto.RegisterExtension(E_Resource) +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_resource_proto_goTypes = []interface{}{ + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (*ResourceDescriptor)(nil), // 1: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 2: google.api.ResourceReference + (*descriptor.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*descriptor.FileOptions)(nil), // 4: google.protobuf.FileOptions + (*descriptor.MessageOptions)(nil), // 5: google.protobuf.MessageOptions +} +var file_google_api_resource_proto_depIdxs = []int32{ + 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History + 3, // 1: google.api.resource_reference:extendee -> google.protobuf.FieldOptions + 4, // 2: google.api.resource_definition:extendee -> google.protobuf.FileOptions + 5, // 3: google.api.resource:extendee -> google.protobuf.MessageOptions + 2, // 4: google.api.resource_reference:type_name -> google.api.ResourceReference + 1, // 5: google.api.resource_definition:type_name -> google.api.ResourceDescriptor + 1, // 6: google.api.resource:type_name -> google.api.ResourceDescriptor + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 4, // [4:7] is the sub-list for extension type_name + 1, // [1:4] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/api/resource.proto", fileDescriptor_465e9122405d1bb5) } - -var fileDescriptor_465e9122405d1bb5 = []byte{ - // 430 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x41, 0x6f, 0xd3, 0x30, - 0x18, 0x25, 0x59, 0x45, 0xd7, 0x0f, 0x31, 0x6d, 0x06, 0x89, 0x0c, 0x29, 0x10, 0xf5, 0x80, 0x7a, - 0x4a, 0xa4, 0x71, 0x1b, 0x17, 0x3a, 0x96, 0x76, 0x91, 0xba, 0x36, 0x72, 0xd3, 0xc3, 0x00, 0x29, - 0xf2, 0xd2, 0xaf, 0x59, 0xa4, 0xcc, 0xb6, 0x9c, 0xec, 0xd0, 0x1b, 0x7f, 0x04, 0x21, 0xf1, 0x2b, - 0x39, 0xa2, 0x3a, 0x71, 0x98, 0xd8, 0xb4, 0x9b, 0xf3, 0xde, 0xfb, 0xbe, 0xf7, 0xfc, 0x1c, 0x38, - 0xce, 0x85, 0xc8, 0x4b, 0x0c, 0x98, 0x2c, 0x02, 0x85, 0x95, 0xb8, 0x53, 0x19, 0xfa, 0x52, 0x89, - 0x5a, 0x10, 0x68, 0x28, 0x9f, 0xc9, 0xe2, 0xad, 0xd7, 0xca, 0x34, 0x73, 0x7d, 0xb7, 0x09, 0xd6, - 0x58, 0x65, 0xaa, 0x90, 0xb5, 0x50, 0x8d, 0x7a, 0xf8, 0xc3, 0x06, 0x42, 0xdb, 0x05, 0xe7, 0x1d, - 0x49, 0x08, 0xf4, 0xea, 0xad, 0x44, 0xc7, 0xf2, 0xac, 0xd1, 0x80, 0xea, 0x33, 0x71, 0xa0, 0x2f, - 0x59, 0x5d, 0xa3, 0xe2, 0x8e, 0xed, 0xed, 0x8d, 0x06, 0xd4, 0x7c, 0x12, 0x17, 0x80, 0xb3, 0x5b, - 0x4c, 0x37, 0x05, 0x96, 0x6b, 0x67, 0x4f, 0xcf, 0x0c, 0x76, 0xc8, 0x64, 0x07, 0x90, 0xcf, 0xd0, - 0xbf, 0x29, 0xaa, 0x5a, 0xa8, 0xad, 0xd3, 0xf3, 0xac, 0xd1, 0xc1, 0xc9, 0x07, 0xff, 0x5f, 0x46, - 0xff, 0xa1, 0xbb, 0x7f, 0xd1, 0xa8, 0xa9, 0x19, 0x1b, 0x7e, 0x83, 0x7e, 0x8b, 0x91, 0x37, 0xf0, - 0xea, 0x22, 0x5a, 0x26, 0x0b, 0x7a, 0x95, 0xae, 0xe6, 0xcb, 0x38, 0xfc, 0x12, 0x4d, 0xa2, 0xf0, - 0xfc, 0xf0, 0x19, 0x71, 0xe1, 0x78, 0x41, 0xa3, 0x69, 0x34, 0x1f, 0xcf, 0x66, 0x57, 0xe9, 0x32, - 0x9a, 0x4f, 0x67, 0x61, 0x1a, 0x8f, 0x93, 0x24, 0xa4, 0xf3, 0x43, 0x8b, 0x38, 0xf0, 0x7a, 0xb2, - 0x4a, 0x56, 0x34, 0x4c, 0x2f, 0x57, 0xb3, 0x24, 0xea, 0x18, 0x7b, 0x38, 0x81, 0x23, 0x93, 0x81, - 0xe2, 0x06, 0x15, 0xf2, 0x0c, 0x1f, 0x2d, 0xc0, 0x05, 0xc8, 0x6e, 0x8a, 0x72, 0x9d, 0x6a, 0xc6, - 0x6e, 0xae, 0xa9, 0x91, 0x64, 0x2b, 0xf1, 0xb4, 0x04, 0x62, 0x9e, 0x22, 0x55, 0xdd, 0x22, 0xd7, - 0xdc, 0xd5, 0xbc, 0x81, 0xaf, 0x4b, 0x59, 0xc8, 0xba, 0x10, 0xbc, 0x72, 0x7e, 0xed, 0x7b, 0xd6, - 0xe8, 0xc5, 0x89, 0xfb, 0x58, 0x23, 0x5d, 0x1a, 0x7a, 0xa4, 0xfe, 0x87, 0x4e, 0xbf, 0xc3, 0xbe, - 0x01, 0xc9, 0xfb, 0x07, 0x1e, 0x97, 0x58, 0x55, 0x2c, 0x47, 0xe3, 0xf2, 0xb3, 0x71, 0x79, 0xf7, - 0x74, 0xef, 0xb4, 0xdb, 0x78, 0xc6, 0xe1, 0x20, 0x13, 0xb7, 0xf7, 0xe4, 0x67, 0x2f, 0x8d, 0x3e, - 0xde, 0x79, 0xc4, 0xd6, 0xd7, 0x71, 0x4b, 0xe6, 0xa2, 0x64, 0x3c, 0xf7, 0x85, 0xca, 0x83, 0x1c, - 0xb9, 0x4e, 0x10, 0x34, 0x14, 0x93, 0x45, 0xa5, 0xff, 0x50, 0xc6, 0xb9, 0xa8, 0x99, 0x8e, 0xf2, - 0xe9, 0xde, 0xf9, 0x8f, 0x65, 0xfd, 0xb6, 0x7b, 0xd3, 0x71, 0x1c, 0x5d, 0x3f, 0xd7, 0x73, 0x1f, - 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x1e, 0x07, 0x80, 0xd8, 0x02, 0x00, 0x00, +func init() { file_google_api_resource_proto_init() } +func file_google_api_resource_proto_init() { + if File_google_api_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_resource_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_resource_proto_goTypes, + DependencyIndexes: file_google_api_resource_proto_depIdxs, + EnumInfos: file_google_api_resource_proto_enumTypes, + MessageInfos: file_google_api_resource_proto_msgTypes, + ExtensionInfos: file_google_api_resource_proto_extTypes, + }.Build() + File_google_api_resource_proto = out.File + file_google_api_resource_proto_rawDesc = nil + file_google_api_resource_proto_goTypes = nil + file_google_api_resource_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go index 0499510d..375d3876 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -1,31 +1,57 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/iam/v1/iam_policy.proto package iam import ( context "context" - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // REQUIRED: The resource for which the policy is being specified. // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` @@ -33,105 +59,121 @@ type SetIamPolicyRequest struct { // the policy is limited to a few 10s of KB. An empty policy is a // valid policy but certain Cloud Platform services (such as Projects) // might reject them. - Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` } -func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} } -func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) } -func (*SetIamPolicyRequest) ProtoMessage() {} -func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{0} +func (x *SetIamPolicyRequest) Reset() { + *x = SetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SetIamPolicyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetIamPolicyRequest.Unmarshal(m, b) -} -func (m *SetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetIamPolicyRequest.Marshal(b, m, deterministic) +func (x *SetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetIamPolicyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetIamPolicyRequest.Merge(m, src) -} -func (m *SetIamPolicyRequest) XXX_Size() int { - return xxx_messageInfo_SetIamPolicyRequest.Size(m) -} -func (m *SetIamPolicyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetIamPolicyRequest.DiscardUnknown(m) + +func (*SetIamPolicyRequest) ProtoMessage() {} + +func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetIamPolicyRequest proto.InternalMessageInfo +// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} +} -func (m *SetIamPolicyRequest) GetResource() string { - if m != nil { - return m.Resource +func (x *SetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *SetIamPolicyRequest) GetPolicy() *Policy { - if m != nil { - return m.Policy +func (x *SetIamPolicyRequest) GetPolicy() *Policy { + if x != nil { + return x.Policy } return nil } // Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // REQUIRED: The resource for which the policy is being requested. // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // OPTIONAL: A `GetPolicyOptions` object for specifying options to // `GetIamPolicy`. This field is only used by Cloud IAM. - Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} } -func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) } -func (*GetIamPolicyRequest) ProtoMessage() {} -func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{1} +func (x *GetIamPolicyRequest) Reset() { + *x = GetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetIamPolicyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIamPolicyRequest.Unmarshal(m, b) -} -func (m *GetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIamPolicyRequest.Marshal(b, m, deterministic) -} -func (m *GetIamPolicyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIamPolicyRequest.Merge(m, src) +func (x *GetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetIamPolicyRequest) XXX_Size() int { - return xxx_messageInfo_GetIamPolicyRequest.Size(m) -} -func (m *GetIamPolicyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetIamPolicyRequest.DiscardUnknown(m) + +func (*GetIamPolicyRequest) ProtoMessage() {} + +func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetIamPolicyRequest proto.InternalMessageInfo +// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} +} -func (m *GetIamPolicyRequest) GetResource() string { - if m != nil { - return m.Resource +func (x *GetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { - if m != nil { - return m.Options +func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { + if x != nil { + return x.Options } return nil } // Request message for `TestIamPermissions` method. type TestIamPermissionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // REQUIRED: The resource for which the policy detail is being requested. // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` @@ -139,143 +181,307 @@ type TestIamPermissionsRequest struct { // wildcards (such as '*' or 'storage.*') are not allowed. For more // information see // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` } -func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} } -func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) } -func (*TestIamPermissionsRequest) ProtoMessage() {} -func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{2} +func (x *TestIamPermissionsRequest) Reset() { + *x = TestIamPermissionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TestIamPermissionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestIamPermissionsRequest.Unmarshal(m, b) -} -func (m *TestIamPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestIamPermissionsRequest.Marshal(b, m, deterministic) -} -func (m *TestIamPermissionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestIamPermissionsRequest.Merge(m, src) -} -func (m *TestIamPermissionsRequest) XXX_Size() int { - return xxx_messageInfo_TestIamPermissionsRequest.Size(m) +func (x *TestIamPermissionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TestIamPermissionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TestIamPermissionsRequest.DiscardUnknown(m) + +func (*TestIamPermissionsRequest) ProtoMessage() {} + +func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TestIamPermissionsRequest proto.InternalMessageInfo +// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} +} -func (m *TestIamPermissionsRequest) GetResource() string { - if m != nil { - return m.Resource +func (x *TestIamPermissionsRequest) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *TestIamPermissionsRequest) GetPermissions() []string { - if m != nil { - return m.Permissions +func (x *TestIamPermissionsRequest) GetPermissions() []string { + if x != nil { + return x.Permissions } return nil } // Response message for `TestIamPermissions` method. type TestIamPermissionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A subset of `TestPermissionsRequest.permissions` that the caller is // allowed. - Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` } -func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} } -func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) } -func (*TestIamPermissionsResponse) ProtoMessage() {} -func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{3} +func (x *TestIamPermissionsResponse) Reset() { + *x = TestIamPermissionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TestIamPermissionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestIamPermissionsResponse.Unmarshal(m, b) +func (x *TestIamPermissionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TestIamPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestIamPermissionsResponse.Marshal(b, m, deterministic) -} -func (m *TestIamPermissionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestIamPermissionsResponse.Merge(m, src) -} -func (m *TestIamPermissionsResponse) XXX_Size() int { - return xxx_messageInfo_TestIamPermissionsResponse.Size(m) -} -func (m *TestIamPermissionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TestIamPermissionsResponse.DiscardUnknown(m) + +func (*TestIamPermissionsResponse) ProtoMessage() {} + +func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TestIamPermissionsResponse proto.InternalMessageInfo +// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} +} -func (m *TestIamPermissionsResponse) GetPermissions() []string { - if m != nil { - return m.Permissions +func (x *TestIamPermissionsResponse) GetPermissions() []string { + if x != nil { + return x.Permissions } return nil } -func init() { - proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest") - proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest") - proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest") - proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse") -} - -func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor_d2728eb97d748a32) } - -var fileDescriptor_d2728eb97d748a32 = []byte{ - // 465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x8a, 0x13, 0x31, - 0x1c, 0x27, 0x5d, 0x58, 0x6d, 0x56, 0x05, 0xa7, 0x88, 0x35, 0x2b, 0xb5, 0x44, 0x0f, 0x6d, 0xa1, - 0x19, 0xbb, 0x9e, 0xac, 0x28, 0xec, 0x7a, 0x18, 0xe6, 0x20, 0x96, 0x51, 0x16, 0x94, 0x82, 0xc6, - 0x31, 0x0c, 0x81, 0xc9, 0x24, 0x4e, 0xd2, 0x05, 0x11, 0x2f, 0x1e, 0x7c, 0x01, 0x6f, 0x3e, 0x82, - 0x67, 0x9f, 0x62, 0xaf, 0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x66, 0x92, 0xee, 0xce, 0x47, 0x95, 0x0a, - 0x9e, 0x4a, 0xf3, 0xfb, 0xfa, 0x7f, 0xcc, 0x1f, 0x0e, 0x12, 0x29, 0x93, 0x94, 0xf9, 0x9c, 0x0a, - 0xff, 0x64, 0x56, 0xfc, 0xbc, 0x52, 0x32, 0xe5, 0xf1, 0x7b, 0xa2, 0x72, 0x69, 0xa4, 0x77, 0xd9, - 0xe2, 0x84, 0x53, 0x41, 0x4e, 0x66, 0x68, 0xbf, 0x4e, 0x97, 0xca, 0x70, 0x99, 0x69, 0xcb, 0x45, - 0xa8, 0x0e, 0x56, 0x7d, 0xd0, 0x4d, 0x87, 0x51, 0xc5, 0x7d, 0x9a, 0x65, 0xd2, 0xd0, 0xaa, 0xf2, - 0x7a, 0x05, 0x8d, 0x53, 0xce, 0x32, 0x63, 0x01, 0xfc, 0x1a, 0xf6, 0x9e, 0x31, 0x13, 0x52, 0xb1, - 0x28, 0xcd, 0x22, 0xf6, 0x6e, 0xc5, 0xb4, 0xf1, 0x10, 0xbc, 0x98, 0x33, 0x2d, 0x57, 0x79, 0xcc, - 0xfa, 0x60, 0x08, 0x46, 0xdd, 0xe8, 0xec, 0xbf, 0x37, 0x85, 0xbb, 0x36, 0xb9, 0xdf, 0x19, 0x82, - 0xd1, 0xde, 0xc1, 0x35, 0x52, 0x6b, 0x81, 0x38, 0x27, 0x47, 0xc2, 0x29, 0xec, 0x05, 0xff, 0x98, - 0x70, 0x1f, 0x5e, 0x70, 0x8d, 0xbb, 0x88, 0x5b, 0x8d, 0x88, 0x80, 0x19, 0xeb, 0xf6, 0xd4, 0xd2, - 0xa2, 0x35, 0x1f, 0xbf, 0x80, 0x37, 0x9e, 0x33, 0x5d, 0xc6, 0xb1, 0x5c, 0x70, 0xad, 0x4b, 0x78, - 0x8b, 0xcc, 0x21, 0xdc, 0x53, 0xe7, 0x8a, 0x7e, 0x67, 0xb8, 0x33, 0xea, 0x46, 0xd5, 0x27, 0xfc, - 0x08, 0xa2, 0x4d, 0xd6, 0x5a, 0xc9, 0x4c, 0xb7, 0xf4, 0xa0, 0xa5, 0x3f, 0xf8, 0xbe, 0x03, 0xbb, - 0xe1, 0xe1, 0x13, 0x5b, 0xb8, 0x67, 0xe0, 0xa5, 0xea, 0xe0, 0x3d, 0xdc, 0x68, 0x71, 0xc3, 0x56, - 0xd0, 0xe6, 0x49, 0xe3, 0xf1, 0xa7, 0x1f, 0x3f, 0xbf, 0x74, 0x6e, 0xe3, 0x41, 0xf1, 0x51, 0x7c, - 0x58, 0x77, 0xf4, 0x70, 0x32, 0xf9, 0x38, 0xd7, 0x15, 0x97, 0x39, 0x98, 0x14, 0xa9, 0xc1, 0xdf, - 0x52, 0x83, 0xff, 0x92, 0x9a, 0x34, 0x52, 0xbf, 0x02, 0xe8, 0xb5, 0x47, 0xe7, 0x8d, 0x1a, 0xc6, - 0x7f, 0x5c, 0x1c, 0x1a, 0x6f, 0xc1, 0xb4, 0x7b, 0xc0, 0x7e, 0x59, 0xd6, 0x18, 0xdf, 0x69, 0x97, - 0x65, 0x5a, 0xaa, 0x39, 0x98, 0xa0, 0xc1, 0xe9, 0xe1, 0x3e, 0xa7, 0x62, 0x2a, 0x98, 0xa1, 0x53, - 0xaa, 0xb8, 0x8b, 0xa2, 0x8a, 0x6b, 0x12, 0x4b, 0x71, 0xf4, 0x19, 0xc0, 0xab, 0xb1, 0x14, 0xf5, - 0x0a, 0x8e, 0xae, 0x9c, 0x35, 0xb8, 0x28, 0xee, 0x68, 0x01, 0x5e, 0xde, 0x75, 0x84, 0x44, 0xa6, - 0x34, 0x4b, 0x88, 0xcc, 0x13, 0x3f, 0x61, 0x59, 0x79, 0x65, 0xfe, 0xb9, 0xa5, 0xbb, 0xdd, 0x07, - 0x9c, 0x8a, 0x5f, 0x00, 0x7c, 0xeb, 0xf4, 0x02, 0xab, 0x7a, 0x9c, 0xca, 0xd5, 0x5b, 0x12, 0x52, - 0x41, 0x8e, 0x67, 0xa7, 0xeb, 0xd7, 0x65, 0xf9, 0xba, 0x0c, 0xa9, 0x58, 0x1e, 0xcf, 0xde, 0xec, - 0x96, 0x5e, 0xf7, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x57, 0xb0, 0xe9, 0x52, 0x04, 0x00, - 0x00, +var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, 0x13, 0x53, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x77, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, + 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, + 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, + 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, + 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc +) + +func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) + }) + return file_google_iam_v1_iam_policy_proto_rawDescData +} + +var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ + (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest + (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest + (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest + (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse + (*Policy)(nil), // 4: google.iam.v1.Policy + (*GetPolicyOptions)(nil), // 5: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy + 5, // 1: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions + 0, // 2: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 1, // 3: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 2, // 4: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 4, // 5: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy + 4, // 6: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy + 3, // 7: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_iam_policy_proto_init() } +func file_google_iam_v1_iam_policy_proto_init() { + if File_google_iam_v1_iam_policy_proto != nil { + return + } + file_google_iam_v1_options_proto_init() + file_google_iam_v1_policy_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestIamPermissionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestIamPermissionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, + MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_iam_policy_proto = out.File + file_google_iam_v1_iam_policy_proto_rawDesc = nil + file_google_iam_v1_iam_policy_proto_goTypes = nil + file_google_iam_v1_iam_policy_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // IAMPolicyClient is the client API for IAMPolicy service. // @@ -299,10 +505,10 @@ type IAMPolicyClient interface { } type iAMPolicyClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient { +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { return &iAMPolicyClient{cc} } @@ -352,6 +558,20 @@ type IAMPolicyServer interface { TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) } +// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations. +type UnimplementedIAMPolicyServer struct { +} + +func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} + func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { s.RegisterService(&_IAMPolicy_serviceDesc, srv) } diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go index 8f02bd3c..e6c2bfcb 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go @@ -1,92 +1,186 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/iam/v1/options.proto package iam import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Optional. The policy format version to be returned. - // Acceptable values are 0 and 1. - // If the value is 0, or the field is omitted, policy format version 1 will be - // returned. - RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value will be + // rejected. + // + // Requests for policies with any conditional bindings must specify version 3. + // Policies without any conditional bindings may specify any valid value or + // leave the field unset. + RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` } -func (m *GetPolicyOptions) Reset() { *m = GetPolicyOptions{} } -func (m *GetPolicyOptions) String() string { return proto.CompactTextString(m) } -func (*GetPolicyOptions) ProtoMessage() {} -func (*GetPolicyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_19aa09e909092bd1, []int{0} +func (x *GetPolicyOptions) Reset() { + *x = GetPolicyOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetPolicyOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPolicyOptions.Unmarshal(m, b) +func (x *GetPolicyOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetPolicyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPolicyOptions.Marshal(b, m, deterministic) + +func (*GetPolicyOptions) ProtoMessage() {} + +func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetPolicyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPolicyOptions.Merge(m, src) + +// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead. +func (*GetPolicyOptions) Descriptor() ([]byte, []int) { + return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} } -func (m *GetPolicyOptions) XXX_Size() int { - return xxx_messageInfo_GetPolicyOptions.Size(m) + +func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { + if x != nil { + return x.RequestedPolicyVersion + } + return 0 } -func (m *GetPolicyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_GetPolicyOptions.DiscardUnknown(m) + +var File_google_iam_v1_options_proto protoreflect.FileDescriptor + +var file_google_iam_v1_options_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, + 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, + 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var xxx_messageInfo_GetPolicyOptions proto.InternalMessageInfo +var ( + file_google_iam_v1_options_proto_rawDescOnce sync.Once + file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc +) -func (m *GetPolicyOptions) GetRequestedPolicyVersion() int32 { - if m != nil { - return m.RequestedPolicyVersion - } - return 0 +func file_google_iam_v1_options_proto_rawDescGZIP() []byte { + file_google_iam_v1_options_proto_rawDescOnce.Do(func() { + file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) + }) + return file_google_iam_v1_options_proto_rawDescData } -func init() { - proto.RegisterType((*GetPolicyOptions)(nil), "google.iam.v1.GetPolicyOptions") +var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_iam_v1_options_proto_goTypes = []interface{}{ + (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_options_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/iam/v1/options.proto", fileDescriptor_19aa09e909092bd1) } - -var fileDescriptor_19aa09e909092bd1 = []byte{ - // 229 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0xd4, 0xcf, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, - 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x85, 0x48, 0xea, 0x65, 0x26, 0xe6, 0xea, - 0x95, 0x19, 0x4a, 0xc9, 0x40, 0xd5, 0x26, 0x16, 0x64, 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, - 0x22, 0x29, 0x56, 0xf2, 0xe1, 0x12, 0x70, 0x4f, 0x2d, 0x09, 0xc8, 0xcf, 0xc9, 0x4c, 0xae, 0xf4, - 0x87, 0x18, 0x23, 0x64, 0xc1, 0x25, 0x51, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x92, 0x9a, 0x12, - 0x5f, 0x00, 0x96, 0x8a, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x93, 0x60, 0x54, 0x60, 0xd4, - 0x60, 0x0d, 0x12, 0x83, 0xcb, 0x43, 0x74, 0x86, 0x41, 0x64, 0x9d, 0x5a, 0x18, 0xb9, 0x04, 0x93, - 0xf3, 0x73, 0xf5, 0x50, 0x5c, 0xe0, 0xc4, 0x03, 0x35, 0x38, 0x00, 0x64, 0x63, 0x00, 0x63, 0x94, - 0x01, 0x54, 0x3a, 0x3d, 0x3f, 0x27, 0x31, 0x2f, 0x5d, 0x2f, 0xbf, 0x28, 0x5d, 0x3f, 0x3d, 0x35, - 0x0f, 0xec, 0x1e, 0x7d, 0x88, 0x54, 0x62, 0x41, 0x66, 0x31, 0xd4, 0x73, 0xd6, 0x99, 0x89, 0xb9, - 0x3f, 0x18, 0x19, 0x57, 0x31, 0x09, 0xbb, 0x43, 0x74, 0x39, 0xe7, 0xe4, 0x97, 0xa6, 0xe8, 0x79, - 0x26, 0xe6, 0xea, 0x85, 0x19, 0x9e, 0x82, 0x89, 0xc6, 0x80, 0x45, 0x63, 0x3c, 0x13, 0x73, 0x63, - 0xc2, 0x0c, 0x93, 0xd8, 0xc0, 0x66, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x96, 0x0c, - 0x8b, 0x27, 0x01, 0x00, 0x00, +func init() { file_google_iam_v1_options_proto_init() } +func file_google_iam_v1_options_proto_init() { + if File_google_iam_v1_options_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPolicyOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_options_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_options_proto_goTypes, + DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, + MessageInfos: file_google_iam_v1_options_proto_msgTypes, + }.Build() + File_google_iam_v1_options_proto = out.File + file_google_iam_v1_options_proto_rawDesc = nil + file_google_iam_v1_options_proto_goTypes = nil + file_google_iam_v1_options_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go index 275cfcea..ba64dcc4 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -1,27 +1,47 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/iam/v1/policy.proto package iam import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" expr "google.golang.org/genproto/googleapis/type/expr" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // The type of action performed on a Binding in a policy. type BindingDelta_Action int32 @@ -35,24 +55,45 @@ const ( BindingDelta_REMOVE BindingDelta_Action = 2 ) -var BindingDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", -} +// Enum value maps for BindingDelta_Action. +var ( + BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) -var BindingDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, +func (x BindingDelta_Action) Enum() *BindingDelta_Action { + p := new(BindingDelta_Action) + *p = x + return p } func (x BindingDelta_Action) String() string { - return proto.EnumName(BindingDelta_Action_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() +} + +func (BindingDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[0] +} + +func (x BindingDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BindingDelta_Action.Descriptor instead. func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{3, 0} + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} } // The type of action performed on an audit configuration in a policy. @@ -67,51 +108,81 @@ const ( AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 ) -var AuditConfigDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", -} +// Enum value maps for AuditConfigDelta_Action. +var ( + AuditConfigDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + AuditConfigDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) -var AuditConfigDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, +func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { + p := new(AuditConfigDelta_Action) + *p = x + return p } func (x AuditConfigDelta_Action) String() string { - return proto.EnumName(AuditConfigDelta_Action_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() } +func (AuditConfigDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[1] +} + +func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{4, 0} + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4, 0} } // Defines an Identity and Access Management (IAM) policy. It is used to // specify access control policies for Cloud Platform resources. // // -// A `Policy` consists of a list of `bindings`. A `binding` binds a list of -// `members` to a `role`, where the members can be user accounts, Google groups, -// Google domains, and service accounts. A `role` is a named list of permissions -// defined by IAM. +// A `Policy` is a collection of `bindings`. A `binding` binds one or more +// `members` to a single `role`. Members can be user accounts, service accounts, +// Google groups, and domains (such as G Suite). A `role` is a named list of +// permissions (defined by IAM or configured by users). A `binding` can +// optionally specify a `condition`, which is a logic expression that further +// constrains the role binding based on attributes about the request and/or +// target resource. // // **JSON Example** // // { // "bindings": [ // { -// "role": "roles/owner", +// "role": "roles/resourcemanager.organizationAdmin", // "members": [ // "user:mike@example.com", // "group:admins@example.com", // "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com" +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" // ] // }, // { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] +// "role": "roles/resourcemanager.organizationViewer", +// "members": ["user:eve@example.com"], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", +// } // } // ] // } @@ -123,19 +194,40 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // - user:mike@example.com // - group:admins@example.com // - domain:google.com -// - serviceAccount:my-other-app@appspot.gserviceaccount.com -// role: roles/owner +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin // - members: -// - user:sean@example.com -// role: roles/viewer -// +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') // // For a description of IAM and its features, see the // [IAM developer's guide](https://cloud.google.com/iam/docs). type Policy struct { - // Deprecated. + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the format of the policy. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value will be + // rejected. + // + // Operations affecting conditional bindings must specify version 3. This can + // be either setting a conditional policy, modifying a conditional binding, + // or removing a binding (conditional or unconditional) from the stored + // conditional policy. + // Operations on non-conditional policies may specify any valid value or + // leave the field unset. + // + // If no etag is provided in the call to `setIamPolicy`, version compliance + // checks against the stored policy is skipped. Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Associates a list of `members` to a `role`. + // Associates a list of `members` to a `role`. Optionally may specify a + // `condition` that determines when binding is in effect. // `bindings` with no members will result in an error. Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` // `etag` is used for optimistic concurrency control as a way to help @@ -147,61 +239,71 @@ type Policy struct { // ensure that their change will be applied to the same version of the policy. // // If no `etag` is provided in the call to `setIamPolicy`, then the existing - // policy is overwritten. - Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // policy is overwritten. Due to blind-set semantics of an etag-less policy, + // 'setIamPolicy' will not fail even if the incoming policy version does not + // meet the requirements for modifying the stored policy. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{0} +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Policy) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Policy.Unmarshal(m, b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return xxx_messageInfo_Policy.Size(m) -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Policy proto.InternalMessageInfo +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} +} -func (m *Policy) GetVersion() int32 { - if m != nil { - return m.Version +func (x *Policy) GetVersion() int32 { + if x != nil { + return x.Version } return 0 } -func (m *Policy) GetBindings() []*Binding { - if m != nil { - return m.Bindings +func (x *Policy) GetBindings() []*Binding { + if x != nil { + return x.Bindings } return nil } -func (m *Policy) GetEtag() []byte { - if m != nil { - return m.Etag +func (x *Policy) GetEtag() []byte { + if x != nil { + return x.Etag } return nil } // Associates `members` with a `role`. type Binding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Role that is assigned to `members`. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -234,104 +336,116 @@ type Binding struct { // NOTE: An unsatisfied condition will not allow user access via current // binding. Different bindings, including their conditions, are examined // independently. - Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` } -func (m *Binding) Reset() { *m = Binding{} } -func (m *Binding) String() string { return proto.CompactTextString(m) } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{1} +func (x *Binding) Reset() { + *x = Binding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Binding) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Binding.Unmarshal(m, b) -} -func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Binding.Marshal(b, m, deterministic) +func (x *Binding) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Binding) XXX_Merge(src proto.Message) { - xxx_messageInfo_Binding.Merge(m, src) -} -func (m *Binding) XXX_Size() int { - return xxx_messageInfo_Binding.Size(m) -} -func (m *Binding) XXX_DiscardUnknown() { - xxx_messageInfo_Binding.DiscardUnknown(m) + +func (*Binding) ProtoMessage() {} + +func (x *Binding) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Binding proto.InternalMessageInfo +// Deprecated: Use Binding.ProtoReflect.Descriptor instead. +func (*Binding) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} +} -func (m *Binding) GetRole() string { - if m != nil { - return m.Role +func (x *Binding) GetRole() string { + if x != nil { + return x.Role } return "" } -func (m *Binding) GetMembers() []string { - if m != nil { - return m.Members +func (x *Binding) GetMembers() []string { + if x != nil { + return x.Members } return nil } -func (m *Binding) GetCondition() *expr.Expr { - if m != nil { - return m.Condition +func (x *Binding) GetCondition() *expr.Expr { + if x != nil { + return x.Condition } return nil } // The difference delta between two policies. type PolicyDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The delta for Bindings between two policies. BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` // The delta for AuditConfigs between two policies. - AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` } -func (m *PolicyDelta) Reset() { *m = PolicyDelta{} } -func (m *PolicyDelta) String() string { return proto.CompactTextString(m) } -func (*PolicyDelta) ProtoMessage() {} -func (*PolicyDelta) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{2} +func (x *PolicyDelta) Reset() { + *x = PolicyDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PolicyDelta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PolicyDelta.Unmarshal(m, b) -} -func (m *PolicyDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PolicyDelta.Marshal(b, m, deterministic) -} -func (m *PolicyDelta) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyDelta.Merge(m, src) +func (x *PolicyDelta) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PolicyDelta) XXX_Size() int { - return xxx_messageInfo_PolicyDelta.Size(m) -} -func (m *PolicyDelta) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyDelta.DiscardUnknown(m) + +func (*PolicyDelta) ProtoMessage() {} + +func (x *PolicyDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_PolicyDelta proto.InternalMessageInfo +// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. +func (*PolicyDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} +} -func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta { - if m != nil { - return m.BindingDeltas +func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if x != nil { + return x.BindingDeltas } return nil } -func (m *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { - if m != nil { - return m.AuditConfigDeltas +func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { + if x != nil { + return x.AuditConfigDeltas } return nil } @@ -339,6 +453,10 @@ func (m *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { // One delta entry for Binding. Each individual change (only one member in each // entry) to a binding will be a separate entry. type BindingDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The action that was performed on a Binding. // Required Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` @@ -350,63 +468,66 @@ type BindingDelta struct { // Follows the same format of Binding.members. // Required Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` - // The condition that is associated with this binding. This field is logged - // only for Cloud Audit Logging. - Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // The condition that is associated with this binding. + Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` } -func (m *BindingDelta) Reset() { *m = BindingDelta{} } -func (m *BindingDelta) String() string { return proto.CompactTextString(m) } -func (*BindingDelta) ProtoMessage() {} -func (*BindingDelta) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{3} +func (x *BindingDelta) Reset() { + *x = BindingDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BindingDelta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindingDelta.Unmarshal(m, b) -} -func (m *BindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindingDelta.Marshal(b, m, deterministic) +func (x *BindingDelta) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BindingDelta) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindingDelta.Merge(m, src) -} -func (m *BindingDelta) XXX_Size() int { - return xxx_messageInfo_BindingDelta.Size(m) -} -func (m *BindingDelta) XXX_DiscardUnknown() { - xxx_messageInfo_BindingDelta.DiscardUnknown(m) + +func (*BindingDelta) ProtoMessage() {} + +func (x *BindingDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BindingDelta proto.InternalMessageInfo +// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. +func (*BindingDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} +} -func (m *BindingDelta) GetAction() BindingDelta_Action { - if m != nil { - return m.Action +func (x *BindingDelta) GetAction() BindingDelta_Action { + if x != nil { + return x.Action } return BindingDelta_ACTION_UNSPECIFIED } -func (m *BindingDelta) GetRole() string { - if m != nil { - return m.Role +func (x *BindingDelta) GetRole() string { + if x != nil { + return x.Role } return "" } -func (m *BindingDelta) GetMember() string { - if m != nil { - return m.Member +func (x *BindingDelta) GetMember() string { + if x != nil { + return x.Member } return "" } -func (m *BindingDelta) GetCondition() *expr.Expr { - if m != nil { - return m.Condition +func (x *BindingDelta) GetCondition() *expr.Expr { + if x != nil { + return x.Condition } return nil } @@ -414,6 +535,10 @@ func (m *BindingDelta) GetCondition() *expr.Expr { // One delta entry for AuditConfig. Each individual change (only one // exempted_member in each entry) to a AuditConfig will be a separate entry. type AuditConfigDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The action that was performed on an audit configuration in a policy. // Required Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` @@ -429,112 +554,265 @@ type AuditConfigDelta struct { // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always // enabled, and cannot be configured. // Required - LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` } -func (m *AuditConfigDelta) Reset() { *m = AuditConfigDelta{} } -func (m *AuditConfigDelta) String() string { return proto.CompactTextString(m) } -func (*AuditConfigDelta) ProtoMessage() {} -func (*AuditConfigDelta) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{4} +func (x *AuditConfigDelta) Reset() { + *x = AuditConfigDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *AuditConfigDelta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AuditConfigDelta.Unmarshal(m, b) -} -func (m *AuditConfigDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AuditConfigDelta.Marshal(b, m, deterministic) +func (x *AuditConfigDelta) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AuditConfigDelta) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditConfigDelta.Merge(m, src) -} -func (m *AuditConfigDelta) XXX_Size() int { - return xxx_messageInfo_AuditConfigDelta.Size(m) -} -func (m *AuditConfigDelta) XXX_DiscardUnknown() { - xxx_messageInfo_AuditConfigDelta.DiscardUnknown(m) + +func (*AuditConfigDelta) ProtoMessage() {} + +func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_AuditConfigDelta proto.InternalMessageInfo +// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. +func (*AuditConfigDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} +} -func (m *AuditConfigDelta) GetAction() AuditConfigDelta_Action { - if m != nil { - return m.Action +func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { + if x != nil { + return x.Action } return AuditConfigDelta_ACTION_UNSPECIFIED } -func (m *AuditConfigDelta) GetService() string { - if m != nil { - return m.Service +func (x *AuditConfigDelta) GetService() string { + if x != nil { + return x.Service } return "" } -func (m *AuditConfigDelta) GetExemptedMember() string { - if m != nil { - return m.ExemptedMember +func (x *AuditConfigDelta) GetExemptedMember() string { + if x != nil { + return x.ExemptedMember } return "" } -func (m *AuditConfigDelta) GetLogType() string { - if m != nil { - return m.LogType +func (x *AuditConfigDelta) GetLogType() string { + if x != nil { + return x.LogType } return "" } -func init() { - proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value) - proto.RegisterEnum("google.iam.v1.AuditConfigDelta_Action", AuditConfigDelta_Action_name, AuditConfigDelta_Action_value) - proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy") - proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding") - proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta") - proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta") - proto.RegisterType((*AuditConfigDelta)(nil), "google.iam.v1.AuditConfigDelta") -} - -func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor_a3cd40b8a66b2a99) } - -var fileDescriptor_a3cd40b8a66b2a99 = []byte{ - // 550 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0xae, 0xd2, 0x4e, - 0x14, 0xc7, 0x7f, 0x03, 0xfc, 0xca, 0xe5, 0x70, 0x2f, 0xc2, 0xdc, 0x84, 0x54, 0x34, 0x91, 0x74, - 0xa1, 0xac, 0x5a, 0xc1, 0xb8, 0xd1, 0xc4, 0x84, 0x7f, 0x1a, 0x16, 0xf7, 0x42, 0xc6, 0x2b, 0x0b, - 0x43, 0x42, 0x86, 0x76, 0xac, 0x63, 0xda, 0x4e, 0xd3, 0xf6, 0x12, 0x58, 0xfb, 0x26, 0x2e, 0x7d, - 0x14, 0x1f, 0xc2, 0xad, 0xaf, 0xe0, 0xd2, 0x74, 0xa6, 0x45, 0x68, 0x8c, 0x1a, 0x77, 0x73, 0xce, - 0xf9, 0xce, 0x39, 0xe7, 0xf3, 0x6d, 0x07, 0x3a, 0xae, 0x10, 0xae, 0xc7, 0x2c, 0x4e, 0x7d, 0x6b, - 0xdb, 0xb7, 0x42, 0xe1, 0x71, 0x7b, 0x6f, 0x86, 0x91, 0x48, 0x04, 0xbe, 0x50, 0x35, 0x93, 0x53, - 0xdf, 0xdc, 0xf6, 0x3b, 0xed, 0x4c, 0x9a, 0xec, 0x43, 0x66, 0xb1, 0x5d, 0x18, 0x29, 0x59, 0xe7, - 0x7e, 0x96, 0xa7, 0x21, 0xb7, 0x68, 0x10, 0x88, 0x84, 0x26, 0x5c, 0x04, 0xb1, 0xaa, 0x1a, 0x1f, - 0x40, 0x5b, 0xc8, 0xa6, 0x58, 0x87, 0xea, 0x96, 0x45, 0x31, 0x17, 0x81, 0x8e, 0xba, 0xa8, 0xf7, - 0x3f, 0xc9, 0x43, 0x3c, 0x80, 0xb3, 0x0d, 0x0f, 0x1c, 0x1e, 0xb8, 0xb1, 0x5e, 0xe9, 0x96, 0x7b, - 0xf5, 0x41, 0xdb, 0x3c, 0x99, 0x6d, 0x8e, 0x54, 0x99, 0x1c, 0x74, 0x18, 0x43, 0x85, 0x25, 0xd4, - 0xd5, 0xcb, 0x5d, 0xd4, 0x3b, 0x27, 0xf2, 0x6c, 0xbc, 0x87, 0x6a, 0x26, 0x4c, 0xcb, 0x91, 0xf0, - 0x98, 0x9c, 0x54, 0x23, 0xf2, 0x9c, 0x2e, 0xe0, 0x33, 0x7f, 0xc3, 0xa2, 0x58, 0x2f, 0x75, 0xcb, - 0xbd, 0x1a, 0xc9, 0x43, 0x6c, 0x41, 0xcd, 0x16, 0x81, 0xc3, 0xd3, 0xc5, 0x65, 0xc7, 0xfa, 0xa0, - 0x95, 0x6f, 0x90, 0xe2, 0x9a, 0xd3, 0x5d, 0x18, 0x91, 0x9f, 0x1a, 0xe3, 0x13, 0x82, 0xba, 0xc2, - 0x9a, 0x30, 0x2f, 0xa1, 0x78, 0x04, 0x8d, 0x6c, 0xb3, 0xb5, 0x93, 0x26, 0x62, 0x1d, 0x49, 0x8e, - 0x7b, 0xbf, 0xe6, 0x90, 0x97, 0xc8, 0xc5, 0xe6, 0x28, 0x8a, 0xf1, 0x1c, 0x2e, 0xe9, 0xad, 0xc3, - 0x93, 0xb5, 0x2d, 0x82, 0x77, 0xfc, 0xd0, 0xa8, 0x24, 0x1b, 0x3d, 0x28, 0x34, 0x1a, 0xa6, 0xca, - 0xb1, 0x14, 0xaa, 0x66, 0x2d, 0x5a, 0xc8, 0xc4, 0xc6, 0x57, 0x04, 0xe7, 0xc7, 0x03, 0xf1, 0x33, - 0xd0, 0xa8, 0x9d, 0xe4, 0x1f, 0xa0, 0x31, 0x30, 0x7e, 0xb3, 0x9d, 0x39, 0x94, 0x4a, 0x92, 0xdd, - 0x38, 0x18, 0x5a, 0x3a, 0x32, 0xb4, 0x0d, 0x9a, 0x72, 0x50, 0x7a, 0x56, 0x23, 0x59, 0x74, 0x6a, - 0x67, 0xe5, 0x2f, 0xec, 0x7c, 0x0a, 0x9a, 0x1a, 0x87, 0xdb, 0x80, 0x87, 0xe3, 0x9b, 0xd9, 0xfc, - 0x7a, 0xfd, 0xe6, 0xfa, 0xf5, 0x62, 0x3a, 0x9e, 0xbd, 0x9c, 0x4d, 0x27, 0xcd, 0xff, 0x70, 0x15, - 0xca, 0xc3, 0xc9, 0xa4, 0x89, 0x30, 0x80, 0x46, 0xa6, 0x57, 0xf3, 0xe5, 0xb4, 0x59, 0x32, 0xbe, - 0x21, 0x68, 0x16, 0x8d, 0xc0, 0x2f, 0x0a, 0x90, 0x0f, 0xff, 0xe0, 0x5c, 0x11, 0x54, 0x87, 0x6a, - 0xcc, 0xa2, 0x2d, 0xb7, 0x73, 0xd6, 0x3c, 0xc4, 0x8f, 0xe0, 0x0e, 0xdb, 0x31, 0x3f, 0x4c, 0x98, - 0xb3, 0x3e, 0xe1, 0x6e, 0xe4, 0xe9, 0x2b, 0xc5, 0x7f, 0x17, 0xce, 0x3c, 0xe1, 0xae, 0x53, 0x54, - 0x89, 0x5f, 0x23, 0x55, 0x4f, 0xb8, 0x37, 0xfb, 0x90, 0xfd, 0x23, 0xe9, 0xe8, 0x23, 0x82, 0x96, - 0x2d, 0xfc, 0x53, 0x94, 0x51, 0xf6, 0x0b, 0x2e, 0xd2, 0x87, 0xb6, 0x40, 0x6f, 0x1f, 0x67, 0x55, - 0x57, 0x78, 0x34, 0x70, 0x4d, 0x11, 0xb9, 0x96, 0xcb, 0x02, 0xf9, 0x0c, 0x2d, 0x55, 0xa2, 0x21, - 0x8f, 0xb3, 0xa7, 0xfe, 0x9c, 0x53, 0xff, 0x3b, 0x42, 0x9f, 0x4b, 0x97, 0xaf, 0xd4, 0xad, 0xb1, - 0x27, 0x6e, 0x1d, 0x73, 0x46, 0x7d, 0x73, 0xd9, 0xff, 0x92, 0x67, 0x57, 0x32, 0xbb, 0x9a, 0x51, - 0x7f, 0xb5, 0xec, 0x6f, 0x34, 0xd9, 0xeb, 0xc9, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x29, 0x86, - 0x8f, 0x3e, 0x35, 0x04, 0x00, 0x00, +var File_google_iam_v1_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_policy_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x6a, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x68, 0x0a, + 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, + 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, + 0x0c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, + 0x0a, 0x10, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, + 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, + 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc +) + +func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) + }) + return file_google_iam_v1_policy_proto_rawDescData +} + +var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_iam_v1_policy_proto_goTypes = []interface{}{ + (BindingDelta_Action)(0), // 0: google.iam.v1.BindingDelta.Action + (AuditConfigDelta_Action)(0), // 1: google.iam.v1.AuditConfigDelta.Action + (*Policy)(nil), // 2: google.iam.v1.Policy + (*Binding)(nil), // 3: google.iam.v1.Binding + (*PolicyDelta)(nil), // 4: google.iam.v1.PolicyDelta + (*BindingDelta)(nil), // 5: google.iam.v1.BindingDelta + (*AuditConfigDelta)(nil), // 6: google.iam.v1.AuditConfigDelta + (*expr.Expr)(nil), // 7: google.type.Expr +} +var file_google_iam_v1_policy_proto_depIdxs = []int32{ + 3, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding + 7, // 1: google.iam.v1.Binding.condition:type_name -> google.type.Expr + 5, // 2: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta + 6, // 3: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta + 0, // 4: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action + 7, // 5: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr + 1, // 6: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_policy_proto_init() } +func file_google_iam_v1_policy_proto_init() { + if File_google_iam_v1_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Binding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PolicyDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BindingDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditConfigDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, + NumEnums: 2, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, + EnumInfos: file_google_iam_v1_policy_proto_enumTypes, + MessageInfos: file_google_iam_v1_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_policy_proto = out.File + file_google_iam_v1_policy_proto_rawDesc = nil + file_google_iam_v1_policy_proto_goTypes = nil + file_google_iam_v1_policy_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 9ff770b5..9034439a 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,27 +1,46 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/rpc/code.proto package code import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 -// The canonical error codes for Google APIs. +// The canonical error codes for gRPC APIs. // // // Sometimes multiple error codes may apply. Services should return @@ -156,7 +175,8 @@ const ( Code_INTERNAL Code = 13 // The service is currently unavailable. This is most likely a // transient condition, which can be corrected by retrying with - // a backoff. + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. @@ -169,83 +189,153 @@ const ( Code_DATA_LOSS Code = 15 ) -var Code_name = map[int32]string{ - 0: "OK", - 1: "CANCELLED", - 2: "UNKNOWN", - 3: "INVALID_ARGUMENT", - 4: "DEADLINE_EXCEEDED", - 5: "NOT_FOUND", - 6: "ALREADY_EXISTS", - 7: "PERMISSION_DENIED", - 16: "UNAUTHENTICATED", - 8: "RESOURCE_EXHAUSTED", - 9: "FAILED_PRECONDITION", - 10: "ABORTED", - 11: "OUT_OF_RANGE", - 12: "UNIMPLEMENTED", - 13: "INTERNAL", - 14: "UNAVAILABLE", - 15: "DATA_LOSS", -} +// Enum value maps for Code. +var ( + Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + } + Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + } +) -var Code_value = map[string]int32{ - "OK": 0, - "CANCELLED": 1, - "UNKNOWN": 2, - "INVALID_ARGUMENT": 3, - "DEADLINE_EXCEEDED": 4, - "NOT_FOUND": 5, - "ALREADY_EXISTS": 6, - "PERMISSION_DENIED": 7, - "UNAUTHENTICATED": 16, - "RESOURCE_EXHAUSTED": 8, - "FAILED_PRECONDITION": 9, - "ABORTED": 10, - "OUT_OF_RANGE": 11, - "UNIMPLEMENTED": 12, - "INTERNAL": 13, - "UNAVAILABLE": 14, - "DATA_LOSS": 15, +func (x Code) Enum() *Code { + p := new(Code) + *p = x + return p } func (x Code) String() string { - return proto.EnumName(Code_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Code) Descriptor() protoreflect.EnumDescriptor { + return file_google_rpc_code_proto_enumTypes[0].Descriptor() +} + +func (Code) Type() protoreflect.EnumType { + return &file_google_rpc_code_proto_enumTypes[0] +} + +func (x Code) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use Code.Descriptor instead. func (Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_fe593a732623ccf0, []int{0} + return file_google_rpc_code_proto_rawDescGZIP(), []int{0} +} + +var File_google_rpc_code_proto protoreflect.FileDescriptor + +var file_google_rpc_code_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, + 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, + 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, + 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, + 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, + 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, + 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, + 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, + 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64, + 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterEnum("google.rpc.Code", Code_name, Code_value) +var ( + file_google_rpc_code_proto_rawDescOnce sync.Once + file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc +) + +func file_google_rpc_code_proto_rawDescGZIP() []byte { + file_google_rpc_code_proto_rawDescOnce.Do(func() { + file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData) + }) + return file_google_rpc_code_proto_rawDescData +} + +var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_rpc_code_proto_goTypes = []interface{}{ + (Code)(0), // 0: google.rpc.Code +} +var file_google_rpc_code_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_fe593a732623ccf0) } - -var fileDescriptor_fe593a732623ccf0 = []byte{ - // 362 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31, - 0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c, - 0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42, - 0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1, - 0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61, - 0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c, - 0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c, - 0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34, - 0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c, - 0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97, - 0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3, - 0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60, - 0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8, - 0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75, - 0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40, - 0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31, - 0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53, - 0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9, - 0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90, - 0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d, - 0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9, - 0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00, +func init() { file_google_rpc_code_proto_init() } +func file_google_rpc_code_proto_init() { + if File_google_rpc_code_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_code_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_code_proto_goTypes, + DependencyIndexes: file_google_rpc_code_proto_depIdxs, + EnumInfos: file_google_rpc_code_proto_enumTypes, + }.Build() + File_google_rpc_code_proto = out.File + file_google_rpc_code_proto_rawDesc = nil + file_google_rpc_code_proto_goTypes = nil + file_google_rpc_code_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 0b9907f8..5dfabd64 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,163 +1,206 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/rpc/status.proto package status import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. // -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). type Status struct { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` } -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_24d244abaf643bfe, []int{0} +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Status proto.InternalMessageInfo +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } return 0 } -func (m *Status) GetMessage() string { - if m != nil { - return m.Message +func (x *Status) GetMessage() string { + if x != nil { + return x.Message } return "" } -func (m *Status) GetDetails() []*any.Any { - if m != nil { - return m.Details +func (x *Status) GetDetails() []*any.Any { + if x != nil { + return x.Details } return nil } -func init() { - proto.RegisterType((*Status)(nil), "google.rpc.Status") +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } - -var fileDescriptor_24d244abaf643bfe = []byte{ - // 209 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, - 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, - 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, - 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, - 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, - 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, - 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, - 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, - 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, - 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, - 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, - 0x00, +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*any.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 27d46fe0..4d9a47dc 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -1,25 +1,45 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/type/expr.proto package expr import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Represents an expression text. Example: // @@ -27,6 +47,10 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // description: "Determines whether the request has a user account" // expression: "size(request.user) > 0" type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Textual representation of an expression in // Common Expression Language syntax. // @@ -42,84 +66,150 @@ type Expr struct { Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // An optional string indicating the location of the expression for error // reporting, e.g. a file name and a position in the file. - Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` } -func (m *Expr) Reset() { *m = Expr{} } -func (m *Expr) String() string { return proto.CompactTextString(m) } -func (*Expr) ProtoMessage() {} -func (*Expr) Descriptor() ([]byte, []int) { - return fileDescriptor_d7920f1ae7a2722f, []int{0} +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_expr_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Expr) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Expr.Unmarshal(m, b) -} -func (m *Expr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Expr.Marshal(b, m, deterministic) +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Expr) XXX_Merge(src proto.Message) { - xxx_messageInfo_Expr.Merge(m, src) -} -func (m *Expr) XXX_Size() int { - return xxx_messageInfo_Expr.Size(m) -} -func (m *Expr) XXX_DiscardUnknown() { - xxx_messageInfo_Expr.DiscardUnknown(m) + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_google_type_expr_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Expr proto.InternalMessageInfo +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_google_type_expr_proto_rawDescGZIP(), []int{0} +} -func (m *Expr) GetExpression() string { - if m != nil { - return m.Expression +func (x *Expr) GetExpression() string { + if x != nil { + return x.Expression } return "" } -func (m *Expr) GetTitle() string { - if m != nil { - return m.Title +func (x *Expr) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *Expr) GetDescription() string { - if m != nil { - return m.Description +func (x *Expr) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Expr) GetLocation() string { - if m != nil { - return m.Location +func (x *Expr) GetLocation() string { + if x != nil { + return x.Location } return "" } -func init() { - proto.RegisterType((*Expr)(nil), "google.type.Expr") +var File_google_type_expr_proto protoreflect.FileDescriptor + +var file_google_type_expr_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x1e, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, + 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x5a, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x45, 0x78, 0x70, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/type/expr.proto", fileDescriptor_d7920f1ae7a2722f) } - -var fileDescriptor_d7920f1ae7a2722f = []byte{ - // 195 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0x4f, 0xad, 0x28, 0x28, 0xd2, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x88, 0xeb, 0x81, 0xc4, 0x95, 0xaa, 0xb8, 0x58, 0x5c, 0x2b, 0x0a, - 0x8a, 0x84, 0xe4, 0xb8, 0xb8, 0x40, 0x4a, 0x52, 0x8b, 0x8b, 0x33, 0xf3, 0xf3, 0x24, 0x18, 0x15, - 0x18, 0x35, 0x38, 0x83, 0x90, 0x44, 0x84, 0x44, 0xb8, 0x58, 0x4b, 0x32, 0x4b, 0x72, 0x52, 0x25, - 0x98, 0xc0, 0x52, 0x10, 0x8e, 0x90, 0x02, 0x17, 0x77, 0x4a, 0x6a, 0x71, 0x72, 0x51, 0x66, 0x41, - 0x09, 0x48, 0x1b, 0x33, 0x58, 0x0e, 0x59, 0x48, 0x48, 0x8a, 0x8b, 0x23, 0x27, 0x3f, 0x39, 0x11, - 0x2c, 0xcd, 0x02, 0x96, 0x86, 0xf3, 0x9d, 0xa2, 0xb8, 0xf8, 0x93, 0xf3, 0x73, 0xf5, 0x90, 0x9c, - 0xe3, 0xc4, 0x09, 0x72, 0x4c, 0x00, 0xc8, 0x99, 0x01, 0x8c, 0x51, 0x26, 0x50, 0x99, 0xf4, 0xfc, - 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x27, 0xf4, 0x21, - 0x52, 0x89, 0x05, 0x99, 0xc5, 0x08, 0xff, 0x59, 0x83, 0x88, 0x45, 0x4c, 0xcc, 0xee, 0x21, 0x01, - 0x49, 0x6c, 0x60, 0x65, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x67, 0x9e, 0xf5, 0x05, - 0x01, 0x00, 0x00, +var ( + file_google_type_expr_proto_rawDescOnce sync.Once + file_google_type_expr_proto_rawDescData = file_google_type_expr_proto_rawDesc +) + +func file_google_type_expr_proto_rawDescGZIP() []byte { + file_google_type_expr_proto_rawDescOnce.Do(func() { + file_google_type_expr_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_expr_proto_rawDescData) + }) + return file_google_type_expr_proto_rawDescData +} + +var file_google_type_expr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_expr_proto_goTypes = []interface{}{ + (*Expr)(nil), // 0: google.type.Expr +} +var file_google_type_expr_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_expr_proto_init() } +func file_google_type_expr_proto_init() { + if File_google_type_expr_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_expr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_expr_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_expr_proto_goTypes, + DependencyIndexes: file_google_type_expr_proto_depIdxs, + MessageInfos: file_google_type_expr_proto_msgTypes, + }.Build() + File_google_type_expr_proto = out.File + file_google_type_expr_proto_rawDesc = nil + file_google_type_expr_proto_goTypes = nil + file_google_type_expr_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index a11e8cbc..9097478f 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -2,22 +2,22 @@ language: go matrix: include: - - go: 1.13.x + - go: 1.14.x env: VET=1 GO111MODULE=on - - go: 1.13.x + - go: 1.14.x env: RACE=1 GO111MODULE=on - - go: 1.13.x + - go: 1.14.x env: RUN386=1 - - go: 1.13.x + - go: 1.14.x env: GRPC_GO_RETRY=on - - go: 1.13.x + - go: 1.14.x env: TESTEXTRAS=1 + - go: 1.13.x + env: GO111MODULE=on - go: 1.12.x env: GO111MODULE=on - - go: 1.11.x + - go: 1.11.x # Keep until interop tests no longer require Go1.11 env: GO111MODULE=on - - go: 1.9.x - env: GAE=1 go_import_path: google.golang.org/grpc @@ -30,13 +30,11 @@ before_install: install: - try3() { eval "$*" || eval "$*" || eval "$*"; } - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi script: - set -e - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - make test diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 4f1567e2..cd03f8c7 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -57,6 +57,5 @@ How to get your contributions merged smoothly and quickly. - `make vet` to catch vet errors - `make test` to run the tests - `make testrace` to run tests in race mode - - optional `make testappengine` to run tests with appengine - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 410f7d56..cf474ae2 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -21,12 +21,7 @@ test: testdeps testsubmodule: testdeps cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... - -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... testdeps: go get -d -v -t google.golang.org/grpc/... @@ -53,8 +48,6 @@ vetdeps: deps \ proto \ test \ - testappengine \ - testappenginedeps \ testdeps \ testrace \ updatedeps \ diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db..3949a683 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,64 +1,53 @@ # gRPC-Go [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) -[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open -source, general RPC framework that puts mobile and HTTP/2 first. For more -information see the [gRPC Quick Start: -Go](https://grpc.io/docs/quickstart/go.html) guide. +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. -Installation ------------- +## Prerequisites -To install this package, you need to install Go and setup your Go workspace on -your computer. The simplest way to install the library is to run: +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. -``` -$ go get -u google.golang.org/grpc +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" ``` -With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in -your source code and `go [build|run|test]` will automatically download the -necessary dependencies ([Go modules -ref](https://github.com/golang/go/wiki/Modules)). +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. -If you are trying to access grpc-go from within China, please see the -[FAQ](#FAQ) below. +Otherwise, to install the `grpc-go` package, run the following command: -Prerequisites -------------- -gRPC-Go requires Go 1.9 or later. +```console +$ go get -u google.golang.org/grpc +``` -Documentation -------------- -- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API - descriptions. -- Documentation on specific topics can be found in the [Documentation - directory](Documentation/). -- Examples can be found in the [examples directory](examples/). +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. -Performance ------------ -Performance benchmark data for grpc-go and other languages is maintained in -[this -dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). +## Learn more -Status ------- -General Availability [Google Cloud Platform Launch -Stages](https://cloud.google.com/terms/launch-stages). +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) -FAQ ---- +## FAQ -#### I/O Timeout Errors +### I/O Timeout Errors -The `golang.org` domain may be blocked from some countries. `go get` usually +The `golang.org` domain may be blocked from some countries. `go get` usually produces an error like the following when this happens: -``` +```console $ go get -u google.golang.org/grpc package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) ``` @@ -69,7 +58,7 @@ To build Go code, there are several options: - Without Go module support: `git clone` the repo manually: - ``` + ```sh git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc ``` @@ -79,7 +68,7 @@ To build Go code, there are several options: - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: - ``` + ```sh go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest go mod tidy go mod vendor @@ -87,35 +76,66 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. Please refer to [this - issue](https://github.com/golang/go/issues/28652) in the golang repo regarding - this concern. + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). -#### Compiling error, undefined: grpc.SupportPackageIsVersion +### Compiling error, undefined: grpc.SupportPackageIsVersion -Please update proto package, gRPC package and rebuild the proto files: - - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - - `go get -u google.golang.org/grpc` - - `protoc --go_out=plugins=grpc:. *.proto` +#### If you are using Go modules: -#### How to turn on logging +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: -The default logger is controlled by the environment variables. Turn everything -on by setting: +```go +module +require ( + google.golang.org/grpc v1.27.0 +) ``` -GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info ``` -#### The RPC failed with error `"code = Unavailable desc = transport is closing"` +### The RPC failed with error `"code = Unavailable desc = transport is closing"` This error means the connection the RPC is using was closed, and there are many possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on logging on __both client and server__, and see if there are any transport errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 68ffc620..ee5c51e6 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -50,6 +50,9 @@ func New(kvs ...interface{}) *Attributes { // times, the last value overwrites all previous values for that key. To // remove an existing key, use a nil value. func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if a == nil { + return New(kvs...) + } if len(kvs)%2 != 0 { panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) } @@ -66,5 +69,8 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } return a.m[key] } diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index a8eb0f47..00000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" -) - -// Address represents a server the client connects to. -// -// Deprecated: please use package balancer. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerConfig specifies the configurations for Balancer. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerConfig struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// BalancerGetOptions configures a Get call. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string, config BalancerConfig) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage of the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -// -// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Errorln("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - select { - case <-rr.addrCh: - default: - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string, config BalancerConfig) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address, 1) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = status.Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 9258858e..8bf359db 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -111,6 +111,9 @@ type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created // SubConn. If it's nil, the original creds from grpc DialOptions will be // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. CredsBundle credentials.Bundle // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn @@ -123,7 +126,7 @@ type State struct { // determine the state of the ClientConn. ConnectivityState connectivity.State // Picker is used to choose connections (SubConns) for RPCs. - Picker V2Picker + Picker Picker } // ClientConn represents a gRPC ClientConn. @@ -141,20 +144,11 @@ type ClientConn interface { // The SubConn will be shutdown. RemoveSubConn(SubConn) - // UpdateBalancerState is called by balancer to notify gRPC that some internal - // state in balancer has changed. - // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - // - // Deprecated: use UpdateState instead - UpdateBalancerState(s connectivity.State, p Picker) - // UpdateState notifies gRPC that the balancer's internal state has // changed. // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConns. + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. UpdateState(State) // ResolveNow is called by balancer to notify gRPC to do a name resolving. @@ -232,55 +226,16 @@ type DoneInfo struct { var ( // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + // gRPC will block the RPC until a new picker is available via UpdateState(). ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) -) - -// Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot every time its -// internal state has changed. -// -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -// -// Deprecated: use V2Picker instead -type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). - // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). - // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure or implements IsTransientFailure() - // bool, returning true: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with the error's status code, or Unknown if it is - // not a status error. - // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. - Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) -} + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) // PickResult contains information related to a connection chosen for an RPC. type PickResult struct { @@ -297,24 +252,19 @@ type PickResult struct { Done func(DoneInfo) } -type transientFailureError struct { - error -} - -func (e *transientFailureError) IsTransientFailure() bool { return true } - -// TransientFailureError wraps err in an error implementing -// IsTransientFailure() bool, returning true. -func TransientFailureError(err error) error { - return &transientFailureError{error: err} -} +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } -// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -type V2Picker interface { +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { // Pick returns the connection to use for this RPC and related information. // // Pick should not block. If the balancer needs to do I/O or any blocking @@ -327,14 +277,13 @@ type V2Picker interface { // - If the error is ErrNoSubConnAvailable, gRPC will block until a new // Picker is provided by the balancer (using ClientConn.UpdateState). // - // - If the error implements IsTransientFailure() bool, returning true, - // wait for ready RPCs will wait, but non-wait for ready RPCs will be - // terminated with this error's Error() string and status code - // Unavailable. + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. // - // - Any other errors terminate all RPCs with the code and message - // provided. If the error is not a status error, it will be converted by - // gRPC to a status error with code Unknown. + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. Pick(info PickInfo) (PickResult, error) } @@ -343,29 +292,21 @@ type V2Picker interface { // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateClientConnState will be called instead. - HandleResolvedAddrs([]resolver.Address, error) + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() @@ -393,27 +334,6 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateClientConnState method will be called -// instead of HandleResolvedAddrs and its UpdateSubConnState will be called -// instead of HandleSubConnStateChange. -type V2Balancer interface { - // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. If the error returned is ErrBadResolverState, the ClientConn - // will begin calling ResolveNow on the active name resolver with - // exponential backoff until a subsequent call to UpdateClientConnState - // returns a nil error. Any other errors are currently ignored. - UpdateClientConnState(ClientConnState) error - // ResolverError is called by gRPC when the name resolver reports an error. - ResolverError(error) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} - // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d7d72918..32d782f1 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -19,7 +19,6 @@ package base import ( - "context" "errors" "fmt" @@ -29,18 +28,18 @@ import ( "google.golang.org/grpc/resolver" ) +var logger = grpclog.Component("balancer") + type baseBuilder struct { - name string - pickerBuilder PickerBuilder - v2PickerBuilder V2PickerBuilder - config Config + name string + pickerBuilder PickerBuilder + config Config } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ - cc: cc, - pickerBuilder: bb.pickerBuilder, - v2PickerBuilder: bb.v2PickerBuilder, + cc: cc, + pickerBuilder: bb.pickerBuilder, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), @@ -50,11 +49,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we // may call UpdateState with this picker. - if bb.pickerBuilder != nil { - bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) - } else { - bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) - } + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) return bal } @@ -62,12 +57,9 @@ func (bb *baseBuilder) Name() string { return bb.name } -var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer - type baseBalancer struct { - cc balancer.ClientConn - pickerBuilder PickerBuilder - v2PickerBuilder V2PickerBuilder + cc balancer.ClientConn + pickerBuilder PickerBuilder csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State @@ -75,47 +67,34 @@ type baseBalancer struct { subConns map[resolver.Address]balancer.SubConn scStates map[balancer.SubConn]connectivity.State picker balancer.Picker - v2Picker balancer.V2Picker config Config resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure } -func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") -} - func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err if len(b.subConns) == 0 { b.state = connectivity.TransientFailure } + if b.state != connectivity.TransientFailure { // The picker will not change since the balancer does not currently // report an error. return } b.regeneratePicker() - if b.picker != nil { - b.cc.UpdateBalancerState(b.state, b.picker) - } else { - b.cc.UpdateState(balancer.State{ - ConnectivityState: b.state, - Picker: b.v2Picker, - }) - } + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) } func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - // TODO: handle s.ResolverState.Err (log if not nil) once implemented. // TODO: handle s.ResolverState.ServiceConfig? - if grpclog.V(2) { - grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) - } - if len(s.ResolverState.Addresses) == 0 { - b.ResolverError(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) } // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil @@ -127,7 +106,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // a is a new address (not existing in b.subConns). sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { - grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } b.subConns[a] = sc @@ -141,9 +120,17 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.cc.RemoveSubConn(sc) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. + // The entry will be deleted in UpdateSubConnState. } } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } return nil } @@ -167,52 +154,38 @@ func (b *baseBalancer) mergeErrors() error { // - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { - if b.pickerBuilder != nil { - b.picker = NewErrPicker(balancer.ErrTransientFailure) - } else { - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) - } + b.picker = NewErrPicker(b.mergeErrors()) return } - if b.pickerBuilder != nil { - readySCs := make(map[resolver.Address]balancer.SubConn) - - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc - } - } - b.picker = b.pickerBuilder.Build(readySCs) - } else { - readySCs := make(map[balancer.SubConn]SubConnInfo) + readySCs := make(map[balancer.SubConn]SubConnInfo) - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} - } + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } - b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } -} - -func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) } oldS, ok := b.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } b.scStates[sc] = s switch s { case connectivity.Idle: @@ -221,29 +194,23 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError } - oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) - // Set or clear the last connection error accordingly. - b.connErr = state.ConnectionError - // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.state == connectivity.TransientFailure { b.regeneratePicker() } - if b.picker != nil { - b.cc.UpdateBalancerState(b.state, b.picker) - } else { - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) - } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } // Close is a nop because base balancer doesn't have internal state to clean up, @@ -251,28 +218,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } -// NewErrPicker returns a picker that always returns err on Pick(). +// NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} } -type errPicker struct { - err error // Pick() always returns this err. -} - -func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err -} - -// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). -func NewErrPickerV2(err error) balancer.V2Picker { - return &errPickerV2{err: err} -} +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker -type errPickerV2 struct { +type errPicker struct { err error // Pick() always returns this err. } -func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 4192918b..e31d76e3 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -37,15 +37,8 @@ import ( // PickerBuilder creates balancer.Picker. type PickerBuilder interface { - // Build takes a slice of ready SubConns, and returns a picker that will be - // used by gRPC to pick a SubConn. - Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker -} - -// V2PickerBuilder creates balancer.V2Picker. -type V2PickerBuilder interface { // Build returns a picker that will be used by gRPC to pick a SubConn. - Build(info PickerBuildInfo) balancer.V2Picker + Build(info PickerBuildInfo) balancer.Picker } // PickerBuildInfo contains information needed by the picker builder to @@ -62,32 +55,17 @@ type SubConnInfo struct { Address resolver.Address // the address used to create this SubConn } -// NewBalancerBuilder returns a balancer builder. The balancers -// built by this builder will use the picker builder to build pickers. -func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) -} - // Config contains the config info about the base balancer builder. type Config struct { // HealthCheck indicates whether health checking should be enabled for this specific balancer. HealthCheck bool } -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, config: config, } } - -// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. -func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { - return &baseBuilder{ - name: name, - v2PickerBuilder: pb, - config: config, - } -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 00000000..a24264a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValues(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index d4d64550..43c2a153 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -33,9 +33,11 @@ import ( // Name is the name of round_robin balancer. const Name = "round_robin" +var logger = grpclog.Component("roundrobin") + // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { @@ -44,10 +46,10 @@ func init() { type rrPickerBuilder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { - grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: newPicker called with info: %v", info) if len(info.ReadySCs) == 0 { - return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } var scs []balancer.SubConn for sc := range info.ReadySCs { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 824f28e7..11e592aa 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -24,8 +24,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) @@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() { } ccb.balancerMu.Lock() su := t.(*scStateUpdate) - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - } else { - ccb.balancer.HandleSubConnStateChange(su.sc, su.state) - } + ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) ccb.balancerMu.Unlock() case <-ccb.done.Done(): } @@ -123,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - return ub.UpdateClientConnState(*ccs) - } - ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) - return nil + return ccb.balancer.UpdateClientConnState(*ccs) } func (ccb *ccBalancerWrapper) resolverError(err error) { - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ccb.balancerMu.Lock() - ub.ResolverError(err) - ccb.balancerMu.Unlock() - } + ccb.balancerMu.Lock() + ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -173,21 +163,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) - ccb.cc.csMgr.updateState(s) -} - func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() @@ -199,7 +174,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.blockingpicker.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } @@ -245,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index db04b08b..00000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,334 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - targetAddr: opts.Target.Endpoint, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - targetAddr string // Target without the scheme. - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} - - // To aggregate the connectivity state. - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.targetAddr, - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.targetAddr}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - oldSC.UpdateAddresses(newAddrs) - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.RecordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() -} - -// The picker is the balancerWrapper itself. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(info.Ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return balancer.PickResult{}, toRPCErr(err) - } - if p != nil { - result.Done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() - } - - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, result.SubConn = range bw.conns { - return result, nil - } - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - var ok1 bool - result.SubConn, ok1 = bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[result.SubConn] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return result, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return balancer.PickResult{}, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index f393bb66..f826ec76 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -1,13 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto +// source: grpc/binlog/v1/binarylog.proto -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +package grpc_binarylog_v1 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +20,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Enumerates the type of event // Note the terminology is different from the RPC semantics @@ -64,6 +66,7 @@ var GrpcLogEntry_EventType_name = map[int32]string{ 6: "EVENT_TYPE_SERVER_TRAILER", 7: "EVENT_TYPE_CANCEL", } + var GrpcLogEntry_EventType_value = map[string]int32{ "EVENT_TYPE_UNKNOWN": 0, "EVENT_TYPE_CLIENT_HEADER": 1, @@ -78,8 +81,9 @@ var GrpcLogEntry_EventType_value = map[string]int32{ func (x GrpcLogEntry_EventType) String() string { return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) } + func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} + return fileDescriptor_b7972e58de45083a, []int{0, 0} } // Enumerates the entity that generates the log entry @@ -96,6 +100,7 @@ var GrpcLogEntry_Logger_name = map[int32]string{ 1: "LOGGER_CLIENT", 2: "LOGGER_SERVER", } + var GrpcLogEntry_Logger_value = map[string]int32{ "LOGGER_UNKNOWN": 0, "LOGGER_CLIENT": 1, @@ -105,8 +110,9 @@ var GrpcLogEntry_Logger_value = map[string]int32{ func (x GrpcLogEntry_Logger) String() string { return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) } + func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} + return fileDescriptor_b7972e58de45083a, []int{0, 1} } type Address_Type int32 @@ -128,6 +134,7 @@ var Address_Type_name = map[int32]string{ 2: "TYPE_IPV6", 3: "TYPE_UNIX", } + var Address_Type_value = map[string]int32{ "TYPE_UNKNOWN": 0, "TYPE_IPV4": 1, @@ -138,8 +145,9 @@ var Address_Type_value = map[string]int32{ func (x Address_Type) String() string { return proto.EnumName(Address_Type_name, int32(x)) } + func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} + return fileDescriptor_b7972e58de45083a, []int{7, 0} } // Log entry we store in binary logs @@ -185,16 +193,17 @@ func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } func (*GrpcLogEntry) ProtoMessage() {} func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} + return fileDescriptor_b7972e58de45083a, []int{0} } + func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) } func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) } -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +func (m *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(m, src) } func (m *GrpcLogEntry) XXX_Size() int { return xxx_messageInfo_GrpcLogEntry.Size(m) @@ -317,9 +326,9 @@ func (m *GrpcLogEntry) GetPeer() *Address { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), @@ -327,108 +336,6 @@ func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) } } -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil -} - -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type ClientHeader struct { // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` @@ -453,16 +360,17 @@ func (m *ClientHeader) Reset() { *m = ClientHeader{} } func (m *ClientHeader) String() string { return proto.CompactTextString(m) } func (*ClientHeader) ProtoMessage() {} func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} + return fileDescriptor_b7972e58de45083a, []int{1} } + func (m *ClientHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientHeader.Unmarshal(m, b) } func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) } -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) +func (m *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(m, src) } func (m *ClientHeader) XXX_Size() int { return xxx_messageInfo_ClientHeader.Size(m) @@ -513,16 +421,17 @@ func (m *ServerHeader) Reset() { *m = ServerHeader{} } func (m *ServerHeader) String() string { return proto.CompactTextString(m) } func (*ServerHeader) ProtoMessage() {} func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} + return fileDescriptor_b7972e58de45083a, []int{2} } + func (m *ServerHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerHeader.Unmarshal(m, b) } func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) } -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) +func (m *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(m, src) } func (m *ServerHeader) XXX_Size() int { return xxx_messageInfo_ServerHeader.Size(m) @@ -560,16 +469,17 @@ func (m *Trailer) Reset() { *m = Trailer{} } func (m *Trailer) String() string { return proto.CompactTextString(m) } func (*Trailer) ProtoMessage() {} func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} + return fileDescriptor_b7972e58de45083a, []int{3} } + func (m *Trailer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Trailer.Unmarshal(m, b) } func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) } -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) +func (m *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(m, src) } func (m *Trailer) XXX_Size() int { return xxx_messageInfo_Trailer.Size(m) @@ -624,16 +534,17 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} + return fileDescriptor_b7972e58de45083a, []int{4} } + func (m *Message) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Message.Unmarshal(m, b) } func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Message.Marshal(b, m, deterministic) } -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) } func (m *Message) XXX_Size() int { return xxx_messageInfo_Message.Size(m) @@ -690,16 +601,17 @@ func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) String() string { return proto.CompactTextString(m) } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} + return fileDescriptor_b7972e58de45083a, []int{5} } + func (m *Metadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metadata.Unmarshal(m, b) } func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) } func (m *Metadata) XXX_Size() int { return xxx_messageInfo_Metadata.Size(m) @@ -730,16 +642,17 @@ func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } func (*MetadataEntry) ProtoMessage() {} func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} + return fileDescriptor_b7972e58de45083a, []int{6} } + func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) } func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) } -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) +func (m *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(m, src) } func (m *MetadataEntry) XXX_Size() int { return xxx_messageInfo_MetadataEntry.Size(m) @@ -779,16 +692,17 @@ func (m *Address) Reset() { *m = Address{} } func (m *Address) String() string { return proto.CompactTextString(m) } func (*Address) ProtoMessage() {} func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} + return fileDescriptor_b7972e58de45083a, []int{7} } + func (m *Address) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address.Unmarshal(m, b) } func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address.Marshal(b, m, deterministic) } -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) +func (m *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(m, src) } func (m *Address) XXX_Size() int { return xxx_messageInfo_Address.Size(m) @@ -821,6 +735,9 @@ func (m *Address) GetIpPort() uint32 { } func init() { + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") @@ -829,72 +746,67 @@ func init() { proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) } -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) -} +func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) } -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto +var fileDescriptor_b7972e58de45083a = []byte{ + // 904 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, + 0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09, + 0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda, + 0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59, + 0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c, + 0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8, + 0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52, + 0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2, + 0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0, + 0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22, + 0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54, + 0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03, + 0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f, + 0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee, + 0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1, + 0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d, + 0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94, + 0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf, + 0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9, + 0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca, + 0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0, + 0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c, + 0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c, + 0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a, + 0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e, + 0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd, + 0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1, + 0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe, + 0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1, + 0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05, + 0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71, + 0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28, + 0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37, + 0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a, + 0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38, + 0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64, + 0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a, + 0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15, + 0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b, + 0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b, + 0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f, + 0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55, + 0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72, + 0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79, + 0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41, + 0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f, + 0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95, + 0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32, + 0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a, + 0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10, + 0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4, + 0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b, + 0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80, + 0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed, + 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3, + 0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f58740b2..ae5ce494 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -35,10 +35,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -68,8 +68,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -151,7 +149,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -161,10 +159,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + channelz.Info(logger, cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -197,12 +192,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = newProxyDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - }, - ) + cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + } + if cc.dopts.withProxy { + cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) + } } if cc.dopts.copts.UserAgent != "" { @@ -219,7 +215,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * defer func() { select { case <-ctx.Done(): - conn, err = nil, ctx.Err() + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } default: } }() @@ -241,14 +244,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.parsedTarget = grpcutil.ParseTarget(cc.target) + unixScheme := strings.HasPrefix(cc.target, "unix:") + channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { // If resolver builder is still nil, the parsed target's scheme is // not registered. Fallback to default resolver and set Endpoint to // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), Endpoint: target, @@ -264,6 +268,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.authority != "" { cc.authority = cc.dopts.authority + } else if unixScheme { + cc.authority = "localhost" } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. @@ -313,7 +319,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if s == connectivity.Ready { break } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { + if err = cc.connectionError(); err != nil { terr, ok := err.(interface { Temporary() bool }) @@ -324,6 +330,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } return nil, ctx.Err() } } @@ -416,12 +425,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -497,6 +501,9 @@ type ClientConn struct { channelzID int64 // channelz unique identification number czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -671,9 +678,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - grpclog.Infof("ClientConn switching balancer to %q", name) + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -681,22 +688,12 @@ func (cc *ClientConn) switchBalancer(name string) { } builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() @@ -720,6 +717,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ + state: connectivity.Idle, cc: cc, addrs: addrs, scopts: opts, @@ -736,7 +734,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -834,7 +832,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -854,7 +852,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -865,9 +863,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the default config -// under the service (i.e /service/). If there is a default MethodConfig for -// the service, we return it. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. // Otherwise, we return an empty MethodConfig. func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. @@ -876,12 +875,14 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { if cc.sc == nil { return MethodConfig{} } - m, ok := cc.sc.Methods[method] - if !ok { - i := strings.LastIndex(method, "/") - m = cc.sc.Methods[method[:i+1]] + if m, ok := cc.sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := cc.sc.Methods[method[:i+1]]; ok { + return m } - return m + return cc.sc.Methods[""] } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -1025,7 +1026,7 @@ func (cc *ClientConn) Close() error { Severity: channelz.CtINFO, } } - channelz.AddTraceEvent(cc.channelzID, ted) + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -1068,15 +1069,8 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1213,12 +1207,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { @@ -1227,7 +1216,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T if firstConnErr == nil { firstConnErr = err } - ac.cc.blockingpicker.updateConnectionError(err) + ac.cc.updateConnectionError(err) } // Couldn't connect to any address. @@ -1242,16 +1231,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne onCloseCalled := make(chan struct{}) reconnect := grpcsync.NewEvent() - authority := ac.cc.authority // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName != "" { - authority = addr.ServerName - } - - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: authority, + if addr.ServerName == "" { + addr.ServerName = ac.cc.authority } once := sync.Once{} @@ -1297,10 +1279,10 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } @@ -1308,7 +1290,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1325,7 +1307,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // // LB channel health checking is enabled when all requirements below are met: // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption -// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 2. internal.HealthCheckFunc is set by importing the grpc/health package // 3. a service config with non-empty healthCheckConfig field is provided // 4. the load balancer requests it // @@ -1355,7 +1337,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - grpclog.Error("Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1385,15 +1367,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1458,7 +1434,7 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -1560,9 +1536,21 @@ var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { - if cc.parsedTarget.Scheme == rb.Scheme() { + if scheme == rb.Scheme() { return rb } } - return resolver.Get(cc.parsedTarget.Scheme) + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError } diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 02738839..11b10618 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,6 +33,9 @@ const ( OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. Canceled Code = 1 // Unknown error. An example of where this error may be returned is @@ -40,12 +43,17 @@ const ( // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. @@ -53,14 +61,21 @@ const ( // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. + // + // This error code will not be generated by the gRPC framework. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. + // + // This error code will not be generated by the gRPC framework. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to @@ -69,10 +84,17 @@ const ( // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the @@ -94,6 +116,8 @@ const ( // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a @@ -102,6 +126,8 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. @@ -119,15 +145,26 @@ const ( // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. + // + // This error code will not be generated by the gRPC framework. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. Internal Code = 13 // Unavailable indicates the service is currently unavailable. @@ -137,13 +174,22 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. Unauthenticated Code = 16 _maxCode = 17 diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 34ec36fb..0a8d682a 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -27,6 +27,8 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // State indicates the state of connectivity. // It can be the state of a ClientConn or SubConn. type State int @@ -44,7 +46,7 @@ func (s State) String() string { case Shutdown: return "SHUTDOWN" default: - grpclog.Errorf("unknown connectivity state: %d", s) + logger.Errorf("unknown connectivity state: %d", s) return "Invalid-State" } } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 845ce5d2..02766443 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -29,6 +29,7 @@ import ( "net" "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" "google.golang.org/grpc/internal" ) @@ -57,9 +58,11 @@ type PerRPCCredentials interface { type SecurityLevel int const ( - // NoSecurity indicates a connection is insecure. + // Invalid indicates an invalid security level. // The zero SecurityLevel value is invalid for backward compatibility. - NoSecurity SecurityLevel = iota + 1 + Invalid SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity // IntegrityOnly indicates a connection only provides integrity protection. IntegrityOnly // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. @@ -100,7 +103,11 @@ type ProtocolInfo struct { ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string - // SecurityVersion is the security protocol version. + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string @@ -120,15 +127,18 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // The auth information should embed CommonAuthInfo to return additional information about - // the credentials. Implementations must use the provided context to implement timely cancellation. - // gRPC will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). - // If the returned error is a wrapper error, implementations should make sure that + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) @@ -189,6 +199,31 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { return } +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + return chi +} + // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. // It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. @@ -204,7 +239,7 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { } if ci, ok := ri.AuthInfo.(internalInfo); ok { // CommonAuthInfo.SecurityLevel has an invalid value. - if ci.GetCommonAuthInfo().SecurityLevel == 0 { + if ci.GetCommonAuthInfo().SecurityLevel == Invalid { return nil } if ci.GetCommonAuthInfo().SecurityLevel < level { @@ -219,6 +254,9 @@ func init() { internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } + internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) + } } // ChannelzSecurityInfo defines the interface that security protocols should implement diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go deleted file mode 100644 index d4346e9e..00000000 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 28b4f623..48384f50 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -25,8 +25,9 @@ import ( "fmt" "io/ioutil" "net" + "net/url" - "google.golang.org/grpc/credentials/internal" + credinternal "google.golang.org/grpc/internal/credentials" ) // TLSInfo contains the auth information for a TLS authenticated connection. @@ -34,6 +35,8 @@ import ( type TLSInfo struct { State tls.ConnectionState CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL } // AuthType returns the type of TLSInfo as a string. @@ -69,7 +72,7 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) + cfg := credinternal.CloneTLSConfig(c.config) if cfg.ServerName == "" { serverName, _, err := net.SplitHostPort(authority) if err != nil { @@ -94,7 +97,17 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { @@ -103,7 +116,17 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) Clone() TransportCredentials { @@ -115,36 +138,33 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps - } - } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc } -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { @@ -208,18 +228,3 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", } - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 63f5ae21..decb4c5e 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -46,18 +45,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool @@ -72,6 +71,7 @@ type dialOptions struct { // we need to be able to configure this in tests. resolveNowBackoff func(int) time.Duration resolvers []resolver.Builder + withProxy bool } // DialOption configures how we set up the connection. @@ -198,19 +198,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. Will be removed in a future 1.x release. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - }) -} - // WithBalancerName sets the balancer that the ClientConn will be initialized // with. Balancer registered with balancerName will be used. This function // panics if no balancer was registered by balancerName. @@ -298,6 +285,19 @@ func WithBlock() DialOption { }) } +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// This API is EXPERIMENTAL. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + // WithInsecure returns a DialOption which disables transport security for this // ClientConn. Note that transport security is required unless WithInsecure is // set. @@ -307,6 +307,16 @@ func WithInsecure() DialOption { }) } +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// This API is EXPERIMENTAL. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.withProxy = false + }) +} + // WithTransportCredentials returns a DialOption which configures a connection // level security credentials (e.g., TLS/SSL). This should not be used together // with WithCredentialsBundle. @@ -412,7 +422,7 @@ func WithUserAgent(s string) DialOption { // for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) kp.Time = internal.KeepaliveMinPingTime } return newFuncDialOption(func(o *dialOptions) { @@ -448,7 +458,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { } // WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, +// interceptor for streaming RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithStreamInterceptor will always be prepended to the chain. @@ -557,6 +567,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, }, resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + withProxy: true, } } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 187adbb1..0022859a 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,6 +16,8 @@ * */ +//go:generate ./regenerate.sh + /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 23783613..31f2b01f 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -3,12 +3,11 @@ module google.golang.org/grpc go 1.11 require ( - github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 - github.com/envoyproxy/protoc-gen-validate v0.1.0 + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f + github.com/envoyproxy/go-control-plane v0.9.4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.2.0 + github.com/golang/protobuf v1.3.3 + github.com/google/go-cmp v0.4.0 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index dd5d0cee..be8078ea 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,10 +1,15 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -14,13 +19,18 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -40,7 +50,10 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -49,5 +62,7 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 00000000..8358dd6e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 874ea6d9..c8bb2be3 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -26,64 +26,70 @@ // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. package grpclog // import "google.golang.org/grpc/grpclog" -import "os" +import ( + "os" -var logger = newLoggerV2() + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return logger.V(l) + return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...interface{}) { - logger.Warning(args...) + grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) + grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...interface{}) { - logger.Warningln(args...) + grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...interface{}) { - logger.Error(args...) + grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) + grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...interface{}) { - logger.Errorln(args...) + grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...interface{}) { - logger.Fatal(args...) + grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -91,7 +97,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) + grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. func Fatalln(args ...interface{}) { - logger.Fatalln(args...) + grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) { // // Deprecated: use Info. func Print(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 097494f7..ef06a482 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,6 +18,8 @@ package grpclog +import "google.golang.org/grpc/internal/grpclog" + // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. @@ -35,7 +37,7 @@ type Logger interface { // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} + grpclog.Logger = &loggerWrapper{Logger: l} } // loggerWrapper wraps Logger into a LoggerV2. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index d4932577..8eba2d0e 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -24,6 +24,8 @@ import ( "log" "os" "strconv" + + "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. @@ -65,7 +67,11 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { - logger = l + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } const ( @@ -193,3 +199,20 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) { func (g *loggerT) V(l int) bool { return l <= g.v } + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index c99e27ae..4c2a527e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -166,11 +166,11 @@ var fileDescriptor_e265fd9d4e077217 = []byte{ // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // HealthClient is the client API for Health service. // @@ -198,10 +198,10 @@ type HealthClient interface { } type healthClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewHealthClient(cc *grpc.ClientConn) HealthClient { +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { return &healthClient{cc} } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 00000000..716f8c01 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// HealthService is the service API for Health service. +// Fields should be assigned to their respective handler implementations only before +// RegisterHealthService is called. Any unassigned fields will result in the +// handler for that method returning an Unimplemented error. +type HealthService struct { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check func(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch func(*HealthCheckRequest, Health_WatchServer) error +} + +func (s *HealthService) check(_ interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + if s.Check == nil { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") + } + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return s.Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: s, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return s.Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} +func (s *HealthService) watch(_ interface{}, stream grpc.ServerStream) error { + if s.Watch == nil { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") + } + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return s.Watch(m, &healthWatchServer{stream}) +} + +// RegisterHealthService registers a service implementation with a gRPC server. +func RegisterHealthService(s grpc.ServiceRegistrar, srv *HealthService) { + sd := grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: srv.check, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: srv.watch, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", + } + + s.RegisterService(&sd, nil) +} + +// NewHealthService creates a new HealthService containing the +// implemented methods of the Health service in s. Any unimplemented +// methods will result in the gRPC server returning an UNIMPLEMENTED status to the client. +// This includes situations where the method handler is misspelled or has the wrong +// signature. For this reason, this function should be used with great care and +// is not recommended to be used by most users. +func NewHealthService(s interface{}) *HealthService { + ns := &HealthService{} + if h, ok := s.(interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + }); ok { + ns.Check = h.Check + } + if h, ok := s.(interface { + Watch(*HealthCheckRequest, Health_WatchServer) error + }); ok { + ns.Watch = h.Watch + } + return ns +} + +// UnstableHealthService is the service API for Health service. +// New methods may be added to this interface if they are added to the service +// definition, which is not a backward-compatible change. For this reason, +// use of this type is not recommended. +type UnstableHealthService interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go new file mode 100644 index 00000000..83c6acf5 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/logging.go @@ -0,0 +1,23 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import "google.golang.org/grpc/grpclog" + +var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh deleted file mode 100644 index b11eccb2..00000000 --- a/vendor/google.golang.org/grpc/health/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/health/v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto -popd -rm -f grpc_health_v1/*.pb.go -cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ - diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index 2262607f..cce6312d 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -16,8 +16,6 @@ * */ -//go:generate ./regenerate.sh - // Package health provides a service that exposes server's health and it must be // imported to enable support for client-side health checks. package health @@ -27,7 +25,6 @@ import ( "sync" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" @@ -35,6 +32,7 @@ import ( // Server implements `service Health`. type Server struct { + healthgrpc.UnimplementedHealthServer mu sync.RWMutex // If shutdown is true, it's expected all serving status is NOT_SERVING, and // will stay in NOT_SERVING. @@ -115,7 +113,7 @@ func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthC s.mu.Lock() defer s.mu.Unlock() if s.shutdown { - grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) return } diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 7c7bcada..00000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 8b105167..5cc3aedd 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -25,6 +25,7 @@ import ( "os" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" ) // Logger is the global binary logger. It can be used to get binary logger for @@ -39,6 +40,8 @@ type Logger interface { // It is used to get a methodLogger for each individual method. var binLogger Logger +var grpclogLogger = grpclog.Component("binarylog") + // SetLogger sets the binarg logger. // // Only call this at init time. @@ -146,9 +149,9 @@ func (l *logger) setBlacklist(method string) error { // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) + s, m, err := grpcutil.ParseMethod(methodName) if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } if ml, ok := l.methods[s+"/"+m]; ok { diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index be30d0e6..d8f4e760 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -24,8 +24,6 @@ import ( "regexp" "strconv" "strings" - - "google.golang.org/grpc/grpclog" ) // NewLoggerFromConfigString reads the string and build a logger. It can be used @@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger { methods := strings.Split(s, ",") for _, method := range methods { if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) + grpclogLogger.Warningf("failed to parse binary log config: %v", err) return nil } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 160f6e86..5e108353 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -27,7 +27,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -219,12 +218,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, @@ -259,12 +258,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, @@ -315,7 +314,7 @@ type ServerTrailer struct { func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") + grpclogLogger.Info("binarylogging: error in trailer is not a status error") } var ( detailsBytes []byte @@ -325,7 +324,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } ret := &pb.GrpcLogEntry{ diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh deleted file mode 100644 index 113d40cb..00000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ - diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index a2e7c346..835f5104 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -29,7 +29,6 @@ import ( "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" ) var ( @@ -78,7 +77,7 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go deleted file mode 100644 index 15dc7803..00000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "strings" -) - -// parseMethodName splits service and method from the input. It expects format -// "/service/method". -// -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") - } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f99..81d3dd33 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") + logger.Error("a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") + logger.Error("a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") + logger.Error("a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,7 +294,19 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUNKNOWN: + l.InfoDepth(depth+1, d.Desc) + case CtINFO: + l.InfoDepth(depth+1, d.Desc) + case CtWarning: + l.WarningDepth(depth+1, d.Desc) + case CtError: + l.ErrorDepth(depth+1, d.Desc) + } + } if getMaxTraceEntry() == 0 { return } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 00000000..e94039ee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtINFO, + }) + } else { + l.InfoDepth(1, args...) + } +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtINFO, + }) + } else { + l.InfoDepth(1, msg) + } +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, args...) + } +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, msg) + } +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + l.ErrorDepth(1, args...) + } +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + l.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 17c2274c..075dc7d1 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" ) // entry represents a node in the channelz database. @@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) { // the addrConn will create a new transport. And when registering the new transport in // channelz, its parent addrConn could have already been torn down and deleted // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) } func (d *dummyEntry) deleteChild(id int64) { // It is possible for a normal program to reach here under race condition. // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) } func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) } func (*dummyEntry) deleteSelfIfReady() { @@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) { case *channel: c.nestedChans[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) } } @@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) { if v, ok := e.(*normalSocket); ok { sc.sockets[id] = v.refName } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) } } @@ -493,11 +492,11 @@ type listenSocket struct { } func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) } func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) } func (ls *listenSocket) triggerDelete() { @@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() { } func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") } func (ls *listenSocket) getParentID() int64 { @@ -522,11 +521,11 @@ type normalSocket struct { } func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) } func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) } func (ns *normalSocket) triggerDelete() { @@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() { } func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") } func (ns *normalSocket) getParentID() int64 { @@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) { case *listenSocket: s.listenSockets[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd618..1b1c4cce 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 79edbefc..da7351e9 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -22,8 +22,6 @@ package channelz import ( "sync" - - "google.golang.org/grpc/grpclog" ) var once sync.Once @@ -39,6 +37,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux os.") }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d5..49432beb 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,4 +1,4 @@ -// +build linux,!appengine +// +build linux /* * diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 00000000..40602303 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + var spiffeID *url.URL + for _, uri := range state.PeerCertificates[0].URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.RawPath) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(state.PeerCertificates[0].URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go new file mode 100644 index 00000000..af6f5771 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go @@ -0,0 +1,31 @@ +// +build appengine + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" + "net/url" +) + +// SPIFFEIDFromState is a no-op for appengine builds. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + return nil +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go similarity index 94% rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn.go rename to vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index 2f4472be..2919632d 100644 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -18,8 +16,7 @@ * */ -// Package internal contains credentials-internal code. -package internal +package credentials import ( "net" diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 00000000..55664fa4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index ae6c8972..73931a94 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -34,5 +34,5 @@ var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 00000000..745a166f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 00000000..82af70e9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 00000000..b25b0bae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 00000000..6f22bd89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 00000000..4e747506 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +// +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 00000000..80b33cda --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func ParseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0912f0bf..818ca857 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -25,6 +25,7 @@ import ( "time" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" ) var ( @@ -37,17 +38,20 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // StatusRawProto is exported by status/status.go. This func returns a - // pointer to the wrapped Status proto for a given status.Status without a - // call to proto.Clone(). The returned Status proto should not be mutated by - // the caller. - StatusRawProto interface{} // func (*status.Status) *spb.Status // NewRequestInfoContext creates a new context based on the argument context attaching // the passed in RequestInfo to the new context. NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // NewClientHandshakeInfoContext returns a copy of the input context with + // the passed in ClientHandshakeInfo struct added to it. + NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index c368db62..30423556 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,6 +32,7 @@ import ( "sync" "time" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" @@ -43,6 +44,8 @@ import ( // addresses from SRV records. Must not be changed after init time. var EnableSRVLookups = false +var logger = grpclog.Component("dns") + func init() { resolver.Register(NewBuilder()) } @@ -251,7 +254,7 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } addr := ip + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) } } return newAddrs, nil @@ -271,7 +274,7 @@ func handleDNSError(err error, lookupType string) error { err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) - grpclog.Infoln(err) + logger.Info(err) } return err } @@ -294,7 +297,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) // This is not an error; it is the equivalent of not having a service config. return nil } @@ -326,13 +329,15 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } - state := &resolver.State{ - Addresses: append(addrs, srv...), + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { state.ServiceConfig = d.lookupTXT() } - return state, nil + return &state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. @@ -418,12 +423,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("dns: error parsing service config json: %v", err) + logger.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("dns: error getting client hostname: %v", err) + logger.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..af3e2b5f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 00000000..710223b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{e: s.Proto()} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} + +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.e, tse.e) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 43281a3e..3743bd45 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -32,16 +30,18 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // GetCPUTime returns the how much CPU time has passed since the start of this process. func GetCPUTime() int64 { var ts unix.Timespec if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) + logger.Fatal(err) } return ts.Nano() } -// Rusage is an alias for syscall.Rusage under linux non-appengine environment. +// Rusage is an alias for syscall.Rusage under linux environment. type Rusage syscall.Rusage // GetRusage returns the resource usage of current process. diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index d3fd9dab..51159e63 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -18,6 +18,8 @@ * */ +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. package syscall import ( @@ -29,43 +31,44 @@ import ( ) var once sync.Once +var logger = grpclog.Component("core") func log() { once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environment.") }) } // GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// It always returns 0 under non-linux environment. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environment. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environment. func GetRusage() (rusage *Rusage) { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environment. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// GetTCPUserTimeout is a no-op function under non-linux environments // a negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ddee20b6..40ef2392 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -505,7 +505,9 @@ func (l *loopyWriter) run() (err error) { // 1. When the connection is closed by some other known issue. // 2. User closed the connection. // 3. A graceful close of connection. - infof("transport: loopyWriter.run returning. %v", err) + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } err = nil } }() @@ -605,7 +607,9 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } return nil } // Case 1.A: Server is responding back with headers. @@ -658,7 +662,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } } } var ( @@ -857,38 +863,45 @@ func (l *loopyWriter) processData() (bool, error) { return false, nil } var ( - idx int buf []byte ) - if len(dataItem.h) != 0 { // data header has not been written out yet. - buf = dataItem.h - } else { - idx = 1 - buf = dataItem.d - } - size := http2MaxFrameLen - if len(buf) < size { - size = len(buf) - } + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil - } else if strQuota < size { - size = strQuota + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d } - if l.sendQuota < uint32(size) { // connection-level flow control. - size = int(l.sendQuota) - } + size := hSize + dSize + // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && size == len(buf) { - // buf contains either data or it contains header but data is empty. - if idx == 1 || len(dataItem.d) == 0 { - endStream = true - } + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() @@ -896,14 +909,10 @@ func (l *loopyWriter) processData() (bool, error) { if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { return false, err } - buf = buf[size:] str.bytesOutStanding += size l.sendQuota -= uint32(size) - if idx == 0 { - dataItem.h = buf - } else { - dataItem.d = buf - } + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. str.itl.dequeue() @@ -924,3 +933,10 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index c3c32daf..05d3871e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -57,7 +58,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before - contentSubtype, validContentType := contentSubtype(contentType) + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { return nil, errors.New("invalid gRPC request content-type") } @@ -112,11 +113,10 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration headerMD metadata.MD @@ -186,8 +186,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } // And flush, in case no header or body has been sent yet. // This forces a separation of headers and trailers if this is the @@ -227,6 +230,8 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if err == nil { // transport has not been closed if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) @@ -236,14 +241,16 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro return err } +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -262,9 +269,30 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } } +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() return ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } ht.rw.Write(hdr) ht.rw.Write(data) ht.rw.(http.Flusher).Flush() @@ -272,27 +300,27 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - v = encodeMetadataHeader(k, v) - h.Add(k, v) - } + if !headersWritten { + ht.writePendingHeaders(s) } + ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) if err == nil { if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), + Header: md.Copy(), + Compression: s.sendCompress, }) } } @@ -338,7 +366,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 2d6feeb1..e7f23211 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -161,7 +163,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -214,12 +216,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the credential handshaker. This makes it possible for + // address specific arbitrary data to reach the credential handshaker. + contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) + connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } } dynamicWindow := true icwz := int32(initialWindowSize) @@ -345,7 +355,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() if err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } // If it's a connection error, let reader goroutine handle it // since there might be data in the buffers. @@ -425,7 +437,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.PreviousAttempts > 0 { @@ -440,7 +452,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) @@ -554,13 +566,26 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + // NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -680,14 +705,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } if t.statsHandler != nil { - header, _, _ := metadata.FromOutgoingContextRaw(ctx) + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, - Header: header.Copy(), + Header: header, } t.statsHandler.HandleRPC(s.ctx, outHeader) } @@ -847,18 +879,10 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e df := &dataFrame{ streamID: s.id, endStream: opts.Last, + h: hdr, + d: data, } - if hdr != nil || data != nil { // If it's not an empty data frame. - // Add some data to grpc message header so that we can equally - // distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df.h, df.d = hdr, data - // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return err } @@ -992,7 +1016,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } statusCode = codes.Unknown } if statusCode == codes.Canceled { @@ -1074,7 +1100,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { return } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } } id := f.LastStreamID if id > 0 && id%2 != 1 { @@ -1188,9 +1216,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if t.statsHandler != nil { if isHeader { inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, } t.statsHandler.HandleRPC(s.ctx, inHeader) } else { @@ -1303,7 +1332,9 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } } } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 8b04b039..3be22fee 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -34,12 +34,10 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -57,9 +55,6 @@ var ( // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") - // statusRawProto is a function to get to the raw status proto wrapped in a - // status.Status without a proto.Clone(). - statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) ) // serverConnectionCounter counts the number of connections a server has seen @@ -294,7 +289,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler if err := t.loopy.run(); err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } t.conn.Close() close(t.writerDone) @@ -365,7 +362,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: true, @@ -396,7 +395,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } s.cancel() return true } @@ -459,7 +460,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() @@ -479,7 +482,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } t.Close() return } @@ -502,7 +507,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } } } } @@ -604,6 +611,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) { if !ok { return } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } if size > 0 { if err := s.fc.onData(size); err != nil { t.closeStream(s, true, http2.ErrCodeFlowControl, false) @@ -724,7 +735,9 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got too many pings from the client, closing the connection.") + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -757,7 +770,9 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } return false } } @@ -794,7 +809,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } @@ -813,10 +828,11 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { return ErrHeaderListSizeLimitViolation } if t.stats != nil { - // Note: WireLength is not set in outHeader. - // TODO(mmukhi): Revisit this later, if needed. + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ - Header: s.header.Copy(), + Header: s.header.Copy(), + Compression: s.sendCompress, } t.stats.HandleRPC(s.Context(), outHeader) } @@ -843,17 +859,17 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { } } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -880,6 +896,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) @@ -911,13 +929,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e return ContextErr(s.ctx.Err()) } } - // Add some data to header frame so that we can equally distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] df := &dataFrame{ streamID: s.id, h: hdr, @@ -989,7 +1000,9 @@ func (t *http2Server) keepalive() { select { case <-ageTimer.C: // Close the connection after grace period. - infof("transport: closing server transport due to maximum connection age.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } t.Close() case <-t.done: } @@ -1006,7 +1019,9 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - infof("transport: closing server transport due to idleness.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } t.Close() return } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 8f5f3349..5e1e7a65 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -37,6 +37,8 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -50,7 +52,7 @@ const ( // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. - baseContentType = "application/grpc" + ) var ( @@ -97,6 +99,7 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } + logger = grpclog.Component("transport") ) type parsedHeaderData struct { @@ -182,46 +185,6 @@ func isWhitelistedHeader(hdr string) bool { } } -// contentSubtype returns the content-subtype for the given content-type. The -// given content-type must be a valid content-type that starts with -// "application/grpc". A content-subtype will follow "application/grpc" after a -// "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If contentType is not a valid content-type for gRPC, the boolean -// will be false, otherwise true. If content-type == "application/grpc", -// "application/grpc+", or "application/grpc;", the boolean will be true, -// but no content-subtype will be returned. -// -// contentType is assumed to be lowercase already. -func contentSubtype(contentType string) (string, bool) { - if contentType == baseContentType { - return "", true - } - if !strings.HasPrefix(contentType, baseContentType) { - return "", false - } - // guaranteed since != baseContentType and has baseContentType prefix - switch contentType[len(baseContentType)] { - case '+', ';': - // this will return true for "application/grpc+" or "application/grpc;" - // which the previous validContentType function tested to be valid, so we - // just say that no content-subtype is specified in this case - return contentType[len(baseContentType)+1:], true - default: - return "", false - } -} - -// contentSubtype is assumed to be lowercase -func contentType(contentSubtype string) string { - if contentSubtype == "" { - return baseContentType - } - return baseContentType + "+" + contentSubtype -} - func (d *decodeState) status() *status.Status { if d.data.statusGen == nil { // No status-details were provided; generate status using code/msg. @@ -340,7 +303,7 @@ func (d *decodeState) addMetadata(k, v string) { func (d *decodeState) processHeaderField(f hpack.HeaderField) { switch f.Name { case "content-type": - contentSubtype, validContentType := contentSubtype(f.Value) + contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) if !validContentType { d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) return @@ -412,7 +375,9 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + if logger.V(logLevel) { + logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + } return } d.addMetadata(f.Name, v) @@ -449,41 +414,6 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { return } -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if t <= 0 { - return "0n" - } - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go deleted file mode 100644 index 879df80c..00000000 --- a/vendor/google.golang.org/grpc/internal/transport/log.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. - -package transport - -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 - -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} - -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } -} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index a30da9eb..b74030a9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -35,11 +35,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) +const logLevel = 2 + type bufferPool struct { pool sync.Pool } @@ -568,17 +571,10 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 } -// TargetInfo contains the information of the target such as network address and metadata. -type TargetInfo struct { - Addr string - Metadata interface{} - Authority string -} - // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index c9f79dc5..00000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "context" - "errors" - "fmt" - "net" - "strconv" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the address resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unnecessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exists until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index f4c1c8b6..00000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// -// This package is deprecated: please use package resolver instead. -package naming - -// Operation defines the corresponding operations for a name resolution change. -// -// Deprecated: please use package resolver. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -// -// Deprecated: please use package resolver. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -// -// Deprecated: please use package resolver. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -// -// Deprecated: please use package resolver. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 00447894..a58174b6 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,80 +20,31 @@ package grpc import ( "context" - "fmt" "io" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) -// v2PickerWrapper wraps a balancer.Picker while providing the -// balancer.V2Picker API. It requires a pickerWrapper to generate errors -// including the latest connectionError. To be deleted when balancer.Picker is -// updated to the balancer.V2Picker API. -type v2PickerWrapper struct { - picker balancer.Picker - connErr *connErr -} - -func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - sc, done, err := v.picker.Pick(info.Ctx, info) - if err != nil { - if err == balancer.ErrTransientFailure { - return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) - } - return balancer.PickResult{}, err - } - return balancer.PickResult{SubConn: sc, Done: done}, nil -} - // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { mu sync.Mutex done bool blockingCh chan struct{} - picker balancer.V2Picker - - // The latest connection error. TODO: remove when V1 picker is deprecated; - // balancer should be responsible for providing the error. - *connErr -} - -type connErr struct { - mu sync.Mutex - err error -} - -func (c *connErr) updateConnectionError(err error) { - c.mu.Lock() - c.err = err - c.mu.Unlock() -} - -func (c *connErr) connectionError() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err + picker balancer.Picker } func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} + return &pickerWrapper{blockingCh: make(chan struct{})} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) -} - -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { pw.mu.Lock() if pw.done { pw.mu.Unlock() @@ -154,8 +105,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() - } else if connectionErr := pw.connectionError(); connectionErr != nil { - errStr = "latest connection error: " + connectionErr.Error() } else { errStr = ctx.Err().Error() } @@ -180,23 +129,22 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { - if !failfast { - lastPickErr = err - continue - } - return nil, nil, status.Error(codes.Unavailable, err.Error()) - } if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. return nil, nil, err } - // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { @@ -210,7 +158,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index c43dac9a..56e33f6c 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -20,13 +20,10 @@ package grpc import ( "errors" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -52,30 +49,16 @@ type pickfirstBalancer struct { sc balancer.SubConn } -var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 - -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - b.ResolverError(err) - return - } - b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error -} - -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) -} - func (b *pickfirstBalancer) ResolverError(err error) { switch b.state { case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: // Set a failing picker if we don't have a good picker. b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, - ) + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } } @@ -88,13 +71,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e var err error b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - if grpclog.V(2) { - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, - ) + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) return balancer.ErrBadResolverState } b.state = connectivity.Idle @@ -108,12 +91,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e } func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) } if b.sc != sc { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") } return } @@ -129,15 +112,9 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) case connectivity.TransientFailure: - err := balancer.ErrTransientFailure - // TODO: this can be unconditional after the V1 API is removed, as - // SubConnState will always contain a connection error. - if s.ConnectionError != nil { - err = balancer.TransientFailureError(s.ConnectionError) - } b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, - Picker: &picker{err: err}, + Picker: &picker{err: s.ConnectionError}, }) } } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 0a12ad22..900bd6c0 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_reflection_v1alpha/reflection.proto +// source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha @@ -47,7 +47,7 @@ func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } func (*ServerReflectionRequest) ProtoMessage() {} func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{0} + return fileDescriptor_e8cf9f2921ad6c95, []int{0} } func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error { @@ -105,7 +105,8 @@ func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_M func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} -func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} @@ -177,7 +178,7 @@ func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } func (*ExtensionRequest) ProtoMessage() {} func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{1} + return fileDescriptor_e8cf9f2921ad6c95, []int{1} } func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error { @@ -234,7 +235,7 @@ func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionRespon func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } func (*ServerReflectionResponse) ProtoMessage() {} func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{2} + return fileDescriptor_e8cf9f2921ad6c95, []int{2} } func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error { @@ -289,7 +290,8 @@ type ServerReflectionResponse_ErrorResponse struct { ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } -func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { } @@ -360,7 +362,7 @@ func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } func (*FileDescriptorResponse) ProtoMessage() {} func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{3} + return fileDescriptor_e8cf9f2921ad6c95, []int{3} } func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error { @@ -404,7 +406,7 @@ func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } func (*ExtensionNumberResponse) ProtoMessage() {} func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{4} + return fileDescriptor_e8cf9f2921ad6c95, []int{4} } func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error { @@ -453,7 +455,7 @@ func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } func (*ListServiceResponse) ProtoMessage() {} func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{5} + return fileDescriptor_e8cf9f2921ad6c95, []int{5} } func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error { @@ -496,7 +498,7 @@ func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } func (*ServiceResponse) ProtoMessage() {} func (*ServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{6} + return fileDescriptor_e8cf9f2921ad6c95, []int{6} } func (m *ServiceResponse) XXX_Unmarshal(b []byte) error { @@ -538,7 +540,7 @@ func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } func (*ErrorResponse) ProtoMessage() {} func (*ErrorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42a8ac412db3cb03, []int{7} + return fileDescriptor_e8cf9f2921ad6c95, []int{7} } func (m *ErrorResponse) XXX_Unmarshal(b []byte) error { @@ -585,61 +587,63 @@ func init() { } func init() { - proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03) -} - -var fileDescriptor_42a8ac412db3cb03 = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40, - 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75, - 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e, - 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e, - 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7, - 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7, - 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e, - 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94, - 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43, - 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36, - 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d, - 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3, - 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd, - 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde, - 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd, - 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc, - 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8, - 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f, - 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33, - 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb, - 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58, - 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a, - 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce, - 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e, - 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25, - 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d, - 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8, - 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62, - 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77, - 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97, - 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0, - 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39, - 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2, - 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad, - 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29, - 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0, - 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73, - 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d, - 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6, - 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5, - 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00, + proto.RegisterFile("reflection/grpc_reflection_v1alpha/reflection.proto", fileDescriptor_e8cf9f2921ad6c95) +} + +var fileDescriptor_e8cf9f2921ad6c95 = []byte{ + // 686 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0xad, 0xdb, 0xa4, 0x55, 0x26, 0x69, 0x92, 0x6f, 0xdb, 0xaf, 0x71, 0x41, 0x45, 0x91, 0xa1, + 0x90, 0x22, 0x94, 0xb4, 0xa9, 0x84, 0x84, 0xb8, 0xa5, 0x80, 0x82, 0x54, 0x5a, 0xe4, 0x70, 0x01, + 0x0e, 0x2b, 0x27, 0x99, 0xb8, 0x06, 0xc7, 0x6b, 0x76, 0xdd, 0x40, 0x4e, 0xfc, 0x08, 0x7e, 0x14, + 0x7f, 0x89, 0x23, 0xda, 0xb5, 0x63, 0x3b, 0x6e, 0x4c, 0xd5, 0x53, 0x9c, 0x37, 0x33, 0xfb, 0x66, + 0xf6, 0xbd, 0xb1, 0xe1, 0x94, 0xe3, 0xc4, 0xc5, 0x51, 0xe0, 0x30, 0xaf, 0x63, 0x73, 0x7f, 0x44, + 0x93, 0xff, 0x74, 0x76, 0x62, 0xb9, 0xfe, 0x95, 0xd5, 0x49, 0xa0, 0xb6, 0xcf, 0x59, 0xc0, 0x48, + 0x43, 0x66, 0xb6, 0x53, 0x70, 0x94, 0x69, 0xfc, 0x59, 0x87, 0xc6, 0x00, 0xf9, 0x0c, 0xb9, 0x19, + 0x07, 0x4d, 0xfc, 0x76, 0x8d, 0x22, 0x20, 0x04, 0x0a, 0x57, 0x4c, 0x04, 0xba, 0xd6, 0xd4, 0x5a, + 0x25, 0x53, 0x3d, 0x93, 0xa7, 0x50, 0x9f, 0x38, 0x2e, 0xd2, 0xe1, 0x9c, 0xca, 0x5f, 0xcf, 0x9a, + 0xa2, 0xbe, 0x21, 0xe3, 0xfd, 0x35, 0xb3, 0x2a, 0x91, 0xde, 0xfc, 0x4d, 0x84, 0x93, 0xe7, 0xb0, + 0xa7, 0x72, 0x47, 0xcc, 0x0b, 0x2c, 0xc7, 0x73, 0x3c, 0x9b, 0x8a, 0xf9, 0x74, 0xc8, 0x5c, 0xbd, + 0x10, 0x55, 0xec, 0xca, 0xf8, 0x59, 0x1c, 0x1e, 0xa8, 0x28, 0xb1, 0x61, 0x3f, 0x5b, 0x87, 0x3f, + 0x02, 0xf4, 0x84, 0xc3, 0x3c, 0xbd, 0xd8, 0xd4, 0x5a, 0xe5, 0xee, 0x51, 0x3b, 0x67, 0xa0, 0xf6, + 0xeb, 0x45, 0x66, 0x34, 0x45, 0x7f, 0xcd, 0x6c, 0x2c, 0xb3, 0xc4, 0x19, 0xa4, 0x07, 0x07, 0x96, + 0xeb, 0x26, 0x87, 0x53, 0xef, 0x7a, 0x3a, 0x44, 0x2e, 0x28, 0x9b, 0xd0, 0x60, 0xee, 0xa3, 0xbe, + 0x19, 0xf5, 0xb9, 0x6f, 0xb9, 0x6e, 0x5c, 0x76, 0x11, 0x26, 0x5d, 0x4e, 0x3e, 0xcc, 0x7d, 0x24, + 0x87, 0xb0, 0xed, 0x3a, 0x22, 0xa0, 0x02, 0xf9, 0xcc, 0x19, 0xa1, 0xd0, 0xb7, 0xa2, 0x9a, 0x8a, + 0x84, 0x07, 0x11, 0xda, 0xfb, 0x0f, 0x6a, 0x53, 0x14, 0xc2, 0xb2, 0x91, 0xf2, 0xb0, 0x31, 0x63, + 0x02, 0xf5, 0x6c, 0xb3, 0xe4, 0x09, 0xd4, 0x52, 0x53, 0xab, 0x1e, 0xc2, 0xdb, 0xaf, 0x26, 0xb0, + 0xa2, 0x3d, 0x82, 0x7a, 0xb6, 0x6d, 0x7d, 0xbd, 0xa9, 0xb5, 0x8a, 0x66, 0x0d, 0x97, 0x1b, 0x35, + 0x7e, 0x17, 0x40, 0xbf, 0x29, 0xb1, 0xf0, 0x99, 0x27, 0x90, 0x1c, 0x00, 0xcc, 0x2c, 0xd7, 0x19, + 0xd3, 0x94, 0xd2, 0x25, 0x85, 0xf4, 0xa5, 0xdc, 0x9f, 0xa1, 0xce, 0xb8, 0x63, 0x3b, 0x9e, 0xe5, + 0x2e, 0xfa, 0x56, 0x34, 0xe5, 0xee, 0x71, 0xae, 0x02, 0x39, 0x76, 0x32, 0x6b, 0x8b, 0x93, 0x16, + 0xc3, 0x7e, 0x05, 0x5d, 0xe9, 0x3c, 0x46, 0x31, 0xe2, 0x8e, 0x1f, 0x30, 0x4e, 0x79, 0xd4, 0x97, + 0x72, 0x48, 0xb9, 0xdb, 0xc9, 0x25, 0x91, 0x26, 0x7b, 0x15, 0xd7, 0x2d, 0xc6, 0xe9, 0xaf, 0x99, + 0xca, 0x72, 0x37, 0x23, 0xe4, 0x3b, 0x3c, 0x58, 0xad, 0x75, 0x4c, 0x59, 0xbc, 0x65, 0xae, 0x8c, + 0x01, 0x52, 0x9c, 0xf7, 0x57, 0xd8, 0x23, 0x26, 0x1e, 0xc3, 0xde, 0x92, 0x41, 0x12, 0xc2, 0x4d, + 0x45, 0xf8, 0x2c, 0x97, 0xf0, 0x3c, 0x31, 0x50, 0x8a, 0x6c, 0x37, 0xed, 0xab, 0x98, 0xe5, 0x12, + 0xaa, 0xc8, 0x79, 0xfa, 0x06, 0xb7, 0xd4, 0xe9, 0x8f, 0xf3, 0xc7, 0x91, 0xe9, 0xa9, 0x73, 0xb7, + 0x31, 0x0d, 0xf4, 0x08, 0xd4, 0x13, 0xc3, 0x86, 0x98, 0x71, 0x0e, 0x7b, 0xab, 0xef, 0x9d, 0x74, + 0xe1, 0xff, 0xac, 0x94, 0xea, 0xc5, 0xa3, 0x6b, 0xcd, 0x8d, 0x56, 0xc5, 0xdc, 0x59, 0x16, 0xe5, + 0xbd, 0x0c, 0x19, 0x5f, 0xa0, 0x91, 0x73, 0xa5, 0xe4, 0x11, 0x54, 0x87, 0x96, 0x40, 0xb5, 0x00, + 0x54, 0xbd, 0x63, 0x42, 0x67, 0x56, 0x24, 0x2a, 0xfd, 0x7f, 0x21, 0xdf, 0x2f, 0xab, 0x77, 0x60, + 0x63, 0xd5, 0x0e, 0x7c, 0x84, 0x9d, 0x15, 0xb7, 0x49, 0x7a, 0xb0, 0x15, 0xc9, 0xa2, 0x1a, 0x2d, + 0x77, 0x5b, 0xff, 0x74, 0x75, 0xaa, 0xd4, 0x5c, 0x14, 0x1a, 0x87, 0x50, 0xcb, 0x1e, 0x4b, 0xa0, + 0x90, 0x6a, 0x5a, 0x3d, 0x1b, 0x03, 0xd8, 0x5e, 0xba, 0x71, 0xb9, 0x79, 0xa1, 0x62, 0x23, 0x36, + 0x0e, 0x53, 0x8b, 0x66, 0x49, 0x21, 0x67, 0x6c, 0x8c, 0xe4, 0x21, 0x84, 0x82, 0xd0, 0x48, 0x05, + 0xb5, 0x76, 0x25, 0xb3, 0xa2, 0xc0, 0x77, 0x21, 0xd6, 0xfd, 0xa5, 0x41, 0x3d, 0xbb, 0x6e, 0xe4, + 0x27, 0xec, 0x66, 0xb1, 0xb7, 0xde, 0x84, 0x91, 0x3b, 0x6f, 0xec, 0xbd, 0x93, 0x3b, 0x54, 0x84, + 0x53, 0xb5, 0xb4, 0x63, 0xad, 0xf7, 0xf2, 0xd3, 0x0b, 0x9b, 0x31, 0xdb, 0xc5, 0xb6, 0xcd, 0x5c, + 0xcb, 0xb3, 0xdb, 0x8c, 0xdb, 0xea, 0x53, 0xd5, 0xb9, 0xfd, 0xd3, 0x35, 0xdc, 0x54, 0xbe, 0x39, + 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x74, 0x3a, 0x67, 0xe7, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // ServerReflectionClient is the client API for ServerReflection service. // @@ -651,10 +655,10 @@ type ServerReflectionClient interface { } type serverReflectionClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient { +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { return &serverReflectionClient{cc} } @@ -746,5 +750,5 @@ var _ServerReflection_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "grpc_reflection_v1alpha/reflection.proto", + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto index 99b00df0..ee2b82c0 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -16,6 +16,8 @@ syntax = "proto3"; +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; + package grpc.reflection.v1alpha; service ServerReflection { diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 00000000..95f9d0cb --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,75 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_reflection_v1alpha + +import ( + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// ServerReflectionService is the service API for ServerReflection service. +// Fields should be assigned to their respective handler implementations only before +// RegisterServerReflectionService is called. Any unassigned fields will result in the +// handler for that method returning an Unimplemented error. +type ServerReflectionService struct { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo func(ServerReflection_ServerReflectionInfoServer) error +} + +func (s *ServerReflectionService) serverReflectionInfo(_ interface{}, stream grpc.ServerStream) error { + if s.ServerReflectionInfo == nil { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") + } + return s.ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +// RegisterServerReflectionService registers a service implementation with a gRPC server. +func RegisterServerReflectionService(s grpc.ServiceRegistrar, srv *ServerReflectionService) { + sd := grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: srv.serverReflectionInfo, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", + } + + s.RegisterService(&sd, nil) +} + +// NewServerReflectionService creates a new ServerReflectionService containing the +// implemented methods of the ServerReflection service in s. Any unimplemented +// methods will result in the gRPC server returning an UNIMPLEMENTED status to the client. +// This includes situations where the method handler is misspelled or has the wrong +// signature. For this reason, this function should be used with great care and +// is not recommended to be used by most users. +func NewServerReflectionService(s interface{}) *ServerReflectionService { + ns := &ServerReflectionService{} + if h, ok := s.(interface { + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error + }); ok { + ns.ServerReflectionInfo = h.ServerReflectionInfo + } + return ns +} + +// UnstableServerReflectionService is the service API for ServerReflection service. +// New methods may be added to this interface if they are added to the service +// definition, which is not a backward-compatible change. For this reason, +// use of this type is not recommended. +type UnstableServerReflectionService interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index dd22a2da..7b6dd414 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto - /* Package reflection implements server reflection service. @@ -57,6 +55,7 @@ import ( ) type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer s *grpc.Server initSymbols sync.Once diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 00000000..750e3500 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "go install github.com/golang/protobuf/protoc-gen-go" +(cd test/tools && go install github.com/golang/protobuf/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +# Pull in the following repos to build the MeshCA config proto. +ENVOY_API_REPOS=( + "https://github.com/envoyproxy/data-plane-api" + "https://github.com/cncf/udpa" + "https://github.com/envoyproxy/protoc-gen-validate" +) +for repo in ${ENVOY_API_REPOS[@]}; do + dirname=$(basename ${repo}) + mkdir -p ${WORKDIR}/${dirname} + echo "git clone ${repo}" + git clone --quiet ${repo} ${WORKDIR}/${dirname} +done + +# Pull in the MeshCA service proto. +mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 +echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" +curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto + +mkdir -p ${WORKDIR}/out + +# Generates legacy gRPC Server symbols in addition to the newer Service symbols +LEGACY_SOURCES=( + ${WORKDIR}/googleapis/google/rpc/code.proto + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + ${WORKDIR}/grpc-proto/grpc/tls/provider/meshca/experimental/config.proto + ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto + profiling/proto/service.proto + reflection/grpc_reflection_v1alpha/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Menvoy/config/core/v3/config_source.proto=github.com/envoyproxy/go-control-plane/envoy/config/core/v3 + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS},plugins=grpc:${WORKDIR}/out --go-grpc_out=${OPTS},migration_mode=true:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/data-plane-api \ + -I${WORKDIR}/udpa \ + -I${WORKDIR}/protoc-gen-validate \ + -I${WORKDIR}/istio \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 + +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ +mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index fe14b2fb..379275a2 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -85,7 +85,10 @@ const ( Backend AddressType = iota // GRPCLB indicates the address is for a grpclb load balancer. // - // Deprecated: use Attributes in Address instead. + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. GRPCLB ) diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 3eaf724c..265002a7 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" @@ -46,34 +45,6 @@ type ccResolverWrapper struct { polling chan struct{} } -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} - // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { @@ -169,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } @@ -181,13 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } - grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver reported error: %v", err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } @@ -196,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } @@ -210,20 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { - grpclog.Infof("Service config lookups disabled; ignoring config") + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } @@ -256,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index d3a4adc5..d4870ba4 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -155,7 +155,6 @@ func (d *gzipDecompressor) Type() string { type callInfo struct { compressorType string failFast bool - stream ClientStream maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials @@ -180,7 +179,7 @@ type CallOption interface { // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. - after(*callInfo) + after(*callInfo, *csAttempt) } // EmptyCallOption does not alter the Call configuration. @@ -188,8 +187,8 @@ type CallOption interface { // by interceptors. type EmptyCallOption struct{} -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. @@ -205,10 +204,8 @@ type HeaderCallOption struct { } func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo) { - if c.stream != nil { - *o.HeaderAddr, _ = c.stream.Header() - } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -225,10 +222,8 @@ type TrailerCallOption struct { } func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo) { - if c.stream != nil { - *o.TrailerAddr = c.stream.Trailer() - } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -245,11 +240,9 @@ type PeerCallOption struct { } func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo) { - if c.stream != nil { - if x, ok := peer.FromContext(c.stream.Context()); ok { - *o.PeerAddr = *x - } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x } } @@ -285,15 +278,16 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo) {} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. +// size in bytes the client can receive. // This is an EXPERIMENTAL API. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int @@ -303,15 +297,16 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. +// size in bytes the client can send. // This is an EXPERIMENTAL API. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int @@ -321,7 +316,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -340,7 +335,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo) {} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -361,7 +356,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo) {} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -394,7 +389,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo) {} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // ForceCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling @@ -426,7 +421,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o ForceCodecCallOption) after(c *callInfo) {} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -448,7 +443,7 @@ func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o CustomCodecCallOption) after(c *callInfo) {} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -469,7 +464,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -871,7 +866,7 @@ type channelzData struct { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 6. +// support package version is 7. // // Older versions are kept for compatibility. They may be removed if // compatibility cannot be maintained. @@ -882,6 +877,7 @@ const ( SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0d75cb10..33fc3240 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -58,6 +59,7 @@ const ( ) var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) @@ -78,27 +80,34 @@ type ServiceDesc struct { Metadata interface{} } -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream } // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool - serve bool - drain bool - cv *sync.Cond // signaled when connections close for GracefulStop - m map[string]*service // service name -> service info - events trace.EventLog + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog quit *grpcsync.Event done *grpcsync.Event @@ -107,6 +116,8 @@ type Server struct { channelzID int64 // channelz unique identification number czData *channelzData + + serverWorkerChannels []chan *serverWorkerData } type serverOptions struct { @@ -116,6 +127,8 @@ type serverOptions struct { dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle statsHandler stats.Handler maxConcurrentStreams uint32 @@ -131,6 +144,7 @@ type serverOptions struct { connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 + numServerWorkers uint32 } var defaultServerOptions = serverOptions{ @@ -211,7 +225,7 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") kp.Time = time.Second } @@ -230,6 +244,12 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.codec = codec @@ -242,7 +262,8 @@ func CustomCodec(codec Codec) ServerOption { // default, server messages will be sent using the same compressor with which // request messages were sent. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCCompressor(cp Compressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.cp = cp @@ -253,7 +274,8 @@ func RPCCompressor(cp Compressor) ServerOption { // messages. It has higher priority than decompressors registered via // encoding.RegisterCompressor. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCDecompressor(dc Decompressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.dc = dc @@ -263,7 +285,7 @@ func RPCDecompressor(dc Decompressor) ServerOption { // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default limit. // -// Deprecated: use MaxRecvMsgSize instead. +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } @@ -311,6 +333,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { }) } +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { @@ -322,6 +354,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { }) } +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { @@ -388,6 +430,66 @@ func HeaderTableSize(s uint32) ServerOption { }) } +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// This API is EXPERIMENTAL. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -396,20 +498,26 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[transport.ServerTransport]bool), - m: make(map[string]*service), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - czData: new(channelzData), - } + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + if channelz.IsOn() { s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") } @@ -432,14 +540,29 @@ func (s *Server) errorf(format string, a ...interface{}) { } } +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before -// invoking Serve. +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } } s.register(sd, ss) } @@ -449,26 +572,26 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] - srv.md[d.MethodName] = d + info.methods[d.MethodName] = d } for i := range sd.Streams { d := &sd.Streams[i] - srv.sd[d.StreamName] = d + info.streams[d.StreamName] = d } - s.m[sd.ServiceName] = srv + s.services[sd.ServiceName] = info } // MethodInfo contains the information of an RPC including its method name and type. @@ -492,16 +615,16 @@ type ServiceInfo struct { // Service names include the package names, in the form of .. func (s *Server) GetServiceInfo() map[string]ServiceInfo { ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { methods = append(methods, MethodInfo{ Name: m, IsClientStream: false, IsServerStream: false, }) } - for m, d := range srv.sd { + for m, d := range srv.streams { methods = append(methods, MethodInfo{ Name: m, IsClientStream: d.ClientStreams, @@ -658,7 +781,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -705,7 +828,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -715,12 +838,27 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close() var wg sync.WaitGroup + + var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -844,12 +982,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -864,7 +1002,41 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return err } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { sh := s.opts.statsHandler if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -987,10 +1159,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } return err } @@ -1021,7 +1191,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return nil } ctx := NewContextWithServerTransportStream(stream.Context(), stream) - reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { @@ -1034,7 +1204,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1061,9 +1231,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1113,7 +1283,41 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1230,8 +1434,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } var appErr error var server interface{} - if srv != nil { - server = srv.server + if info != nil { + server = info.serviceImpl } if s.opts.streamInt == nil { appErr = sd.Handler(server, ss) @@ -1297,7 +1501,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1307,13 +1511,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - srv, knownService := s.m[service] + srv, knownService := s.services[service] if knownService { - if md, ok := srv.md[method]; ok { + if md, ok := srv.methods[method]; ok { s.processUnaryRPC(t, stream, srv, md, trInfo) return } - if sd, ok := srv.sd[method]; ok { + if sd, ok := srv.streams[method]; ok { s.processStreamingRPC(t, stream, srv, sd, trInfo) return } @@ -1338,7 +1542,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1415,6 +1619,9 @@ func (s *Server) Stop() { for c := range st { c.Close() } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } s.mu.Lock() if s.events != nil { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 5a80a575..5e434ca7 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -20,15 +20,16 @@ package grpc import ( "encoding/json" + "errors" "fmt" + "reflect" "strconv" "strings" "time" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -79,7 +80,7 @@ type ServiceConfig struct { serviceconfig.Config // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancer will override this. This is deprecated; + // specified via grpc.WithBalancerName will override this. This is deprecated; // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig // will be used. LB *string @@ -224,19 +225,27 @@ func parseDuration(s *string) (*time.Duration, error) { } type jsonName struct { - Service *string - Method *string + Service string + Method string } -func (j jsonName) generatePath() (string, bool) { - if j.Service == nil { - return "", false +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil } - res := "/" + *j.Service + "/" - if j.Method != nil { - res += *j.Method + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method } - return res, true + return res, nil } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. @@ -249,12 +258,10 @@ type jsonMC struct { RetryPolicy *jsonRetryPolicy } -type loadBalancingConfig map[string]json.RawMessage - // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *[]loadBalancingConfig + LoadBalancingConfig *internalserviceconfig.BalancerConfig MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig @@ -270,7 +277,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -280,53 +287,25 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if rsc.LoadBalancingConfig != nil { - for i, lbcfg := range *rsc.LoadBalancingConfig { - if len(lbcfg) != 1 { - err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) - grpclog.Warningf(err.Error()) - return &serviceconfig.ParseResult{Err: err} - } - var name string - var jsonCfg json.RawMessage - for name, jsonCfg = range lbcfg { - } - builder := balancer.Get(name) - if builder == nil { - continue - } - sc.lbConfig = &lbConfig{name: name} - if parser, ok := builder.(balancer.ConfigParser); ok { - var err error - sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) - if err != nil { - return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} - } - } else if string(jsonCfg) != "{}" { - grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) - } - break - } - if sc.lbConfig == nil { - // We had a loadBalancingConfig field but did not encounter a - // supported policy. The config is considered invalid in this - // case. - err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") - grpclog.Warningf(err.Error()) - return &serviceconfig.ParseResult{Err: err} + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, } } if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} } + + paths := map[string]struct{}{} for _, m := range *rsc.MethodConfig { if m.Name == nil { continue } d, err := parseDuration(m.Timeout) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -335,7 +314,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -352,10 +331,20 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) } } - for _, n := range *m.Name { - if path, valid := n.generatePath(); valid { - sc.Methods[path] = mc + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} } + paths[path] = struct{}{} + sc.Methods[path] = mc } } @@ -388,7 +377,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } @@ -432,3 +421,34 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { func newInt(b int) *int { return &b } + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 9e22c393..63e476ee 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -81,6 +79,10 @@ type InHeader struct { Client bool // WireLength is the wire length of header. WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -89,10 +91,6 @@ type InHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata received. - Header metadata.MD } // IsClient indicates if the stats information is from client side. @@ -141,6 +139,10 @@ func (s *OutPayload) isRPCStats() {} type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -149,10 +151,6 @@ type OutHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata sent. - Header metadata.MD } // IsClient indicates if this stats information is from client side. @@ -165,6 +163,9 @@ type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index a1348e9b..01e182c3 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,88 +29,23 @@ package status import ( "context" - "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/status" ) -func init() { - internal.StatusRawProto = statusRawProto -} - -func statusRawProto(s *Status) *spb.Status { return s.s } - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Is implements future error.Is functionality. -// A statusError is equivalent if the code and message are identical. -func (se *statusError) Is(target error) bool { - tse, ok := target.(*statusError) - if !ok { - return false - } - - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). @@ -135,7 +70,7 @@ func ErrorProto(s *spb.Status) error { // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} + return status.FromProto(s) } // FromError returns a Status representing err if it was produced from this @@ -160,42 +95,6 @@ func Convert(err error) *Status { return s } -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - // Code returns the Code of the error if it is a Status error, codes.OK if err // is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb99940e..fbc3fb11 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,11 +31,11 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -278,7 +278,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } cs.binlog = binarylog.GetMethodLogger(method) - cs.callInfo.stream = cs // Only this initial attempt has stats/tracing. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. if err := cs.newAttemptLocked(sh, trInfo); err != nil { @@ -348,7 +347,16 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r if err := cs.ctx.Err(); err != nil { return toRPCErr(err) } - t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + + ctx := cs.ctx + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { return err } @@ -366,6 +374,11 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } return toRPCErr(err) } cs.attempt.s = s @@ -461,11 +474,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. @@ -474,13 +497,12 @@ func (cs *clientStream) shouldRetry(err error) error { // Wait for the trailers. if cs.attempt.s != nil { <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() } - if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - cs.firstAttempt = false return nil } - cs.firstAttempt = false if cs.cc.dopts.disableRetry { return err } @@ -498,13 +520,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } @@ -566,6 +588,7 @@ func (cs *clientStream) retryLocked(lastErr error) error { cs.commitAttemptLocked() return err } + cs.firstAttempt = false if err := cs.newAttemptLocked(nil, nil); err != nil { return err } @@ -800,6 +823,15 @@ func (cs *clientStream) finish(err error) { } cs.finished = true cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } cs.mu.Unlock() // For binary logging. only log cancel in finish (could be caused by RPC ctx // canceled or ClientConn closed). Trailer will be logged in RecvMsg. @@ -821,15 +853,6 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - if cs.attempt != nil { - cs.attempt.finish(err) - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } - } - } cs.cancel() } @@ -1067,7 +1090,6 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin t: t, } - as.callInfo.stream = as s, err := as.t.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go deleted file mode 100644 index 168cdb85..00000000 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ /dev/null @@ -1,308 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bufconn provides a net.Conn implemented by a buffer and related -// dialing and listening functionality. -package bufconn - -import ( - "fmt" - "io" - "net" - "sync" - "time" -) - -// Listener implements a net.Listener that creates local, buffered net.Conns -// via its Accept and Dial method. -type Listener struct { - mu sync.Mutex - sz int - ch chan net.Conn - done chan struct{} -} - -// Implementation of net.Error providing timeout -type netErrorTimeout struct { - error -} - -func (e netErrorTimeout) Timeout() bool { return true } -func (e netErrorTimeout) Temporary() bool { return false } - -var errClosed = fmt.Errorf("closed") -var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} - -// Listen returns a Listener that can only be contacted by its own Dialers and -// creates buffered connections between the two. -func Listen(sz int) *Listener { - return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} -} - -// Accept blocks until Dial is called, then returns a net.Conn for the server -// half of the connection. -func (l *Listener) Accept() (net.Conn, error) { - select { - case <-l.done: - return nil, errClosed - case c := <-l.ch: - return c, nil - } -} - -// Close stops the listener. -func (l *Listener) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.done: - // Already closed. - break - default: - close(l.done) - } - return nil -} - -// Addr reports the address of the listener. -func (l *Listener) Addr() net.Addr { return addr{} } - -// Dial creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. -func (l *Listener) Dial() (net.Conn, error) { - p1, p2 := newPipe(l.sz), newPipe(l.sz) - select { - case <-l.done: - return nil, errClosed - case l.ch <- &conn{p1, p2}: - return &conn{p2, p1}, nil - } -} - -type pipe struct { - mu sync.Mutex - - // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. - // - // Data is read between [r, w) and written to [w, r), wrapping around the end - // of the slice if necessary. - // - // The buffer is empty if r == len(buf), otherwise if r == w, it is full. - // - // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. - buf []byte - w, r int - - wwait sync.Cond - rwait sync.Cond - - // Indicate that a write/read timeout has occurred - wtimedout bool - rtimedout bool - - wtimer *time.Timer - rtimer *time.Timer - - closed bool - writeClosed bool -} - -func newPipe(sz int) *pipe { - p := &pipe{buf: make([]byte, 0, sz)} - p.wwait.L = &p.mu - p.rwait.L = &p.mu - - p.wtimer = time.AfterFunc(0, func() {}) - p.rtimer = time.AfterFunc(0, func() {}) - return p -} - -func (p *pipe) empty() bool { - return p.r == len(p.buf) -} - -func (p *pipe) full() bool { - return p.r < len(p.buf) && p.r == p.w -} - -func (p *pipe) Read(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - // Block until p has data. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.empty() { - break - } - if p.writeClosed { - return 0, io.EOF - } - if p.rtimedout { - return 0, errTimeout - } - - p.rwait.Wait() - } - wasFull := p.full() - - n = copy(b, p.buf[p.r:len(p.buf)]) - p.r += n - if p.r == cap(p.buf) { - p.r = 0 - p.buf = p.buf[:p.w] - } - - // Signal a blocked writer, if any - if wasFull { - p.wwait.Signal() - } - - return n, nil -} - -func (p *pipe) Write(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.closed { - return 0, io.ErrClosedPipe - } - for len(b) > 0 { - // Block until p is not full. - for { - if p.closed || p.writeClosed { - return 0, io.ErrClosedPipe - } - if !p.full() { - break - } - if p.wtimedout { - return 0, errTimeout - } - - p.wwait.Wait() - } - wasEmpty := p.empty() - - end := cap(p.buf) - if p.w < p.r { - end = p.r - } - x := copy(p.buf[p.w:end], b) - b = b[x:] - n += x - p.w += x - if p.w > len(p.buf) { - p.buf = p.buf[:p.w] - } - if p.w == cap(p.buf) { - p.w = 0 - } - - // Signal a blocked reader, if any. - if wasEmpty { - p.rwait.Signal() - } - } - return n, nil -} - -func (p *pipe) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - p.closed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -func (p *pipe) closeWrite() error { - p.mu.Lock() - defer p.mu.Unlock() - p.writeClosed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -type conn struct { - io.Reader - io.Writer -} - -func (c *conn) Close() error { - err1 := c.Reader.(*pipe).Close() - err2 := c.Writer.(*pipe).closeWrite() - if err1 != nil { - return err1 - } - return err2 -} - -func (c *conn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -func (c *conn) SetReadDeadline(t time.Time) error { - p := c.Reader.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.rtimer.Stop() - p.rtimedout = false - if !t.IsZero() { - p.rtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.rtimedout = true - p.rwait.Broadcast() - }) - } - return nil -} - -func (c *conn) SetWriteDeadline(t time.Time) error { - p := c.Writer.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.wtimer.Stop() - p.wtimedout = false - if !t.IsZero() { - p.wtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.wtimedout = true - p.wwait.Broadcast() - }) - } - return nil -} - -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } - -type addr struct{} - -func (addr) Network() string { return "bufconn" } -func (addr) String() string { return "bufconn" } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1a831b15..89d37ce8 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.27.1" +const Version = "1.32.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 0e737072..c52c1bdb 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -1,20 +1,22 @@ #!/bin/bash -if [[ `uname -a` = *"Darwin"* ]]; then - echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" - exit 1 -fi - set -ex # Exit on error; debugging enabled. set -o pipefail # Fail a pipe if any sub-command fails. +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + die() { echo "$@" >&2 exit 1 } fail_on_output() { - tee /dev/stderr | (! read) + tee /dev/stderr | not read } # Check to make sure it's safe to modify the user's git repo. @@ -37,8 +39,7 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell popd else # Ye olde `go get` incantation. @@ -48,8 +49,7 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell fi if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then @@ -60,7 +60,7 @@ if [[ "$1" = "-install" ]]; then unzip ${PROTOC_FILENAME} bin/protoc --version popd - elif ! which protoc > /dev/null; then + elif not which protoc > /dev/null; then die "Please install protoc into your path" fi fi @@ -70,31 +70,33 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go # - Do not import x/net/context. -(! git grep -l 'x/net/context' -- "*.go") +not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. -(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . +goimports -l . 2>&1 | not grep -vE "\.pb\.go" +golint ./... 2>&1 | not grep -vE "\.pb\.go:" +go vet -all ./... misspell -error . @@ -105,10 +107,10 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our module is tidy. +# - Check that our modules are tidy. if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ + find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' + git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi @@ -119,20 +121,20 @@ fi SC_OUT="$(mktemp)" staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. -(! grep -v "is deprecated:.*SA1019" "${SC_OUT}") +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. -(! grep -Fv '.HandleResolvedAddrs -.HandleSubConnStateChange +not grep -Fv '.CredsBundle .HeaderMap +.Metadata is deprecated: use Attributes .NewAddress .NewServiceConfig -.Metadata is deprecated: use Attributes .Type is deprecated: use Attributes -.UpdateBalancerState +balancer.ErrTransientFailure balancer.Picker grpc.CallCustomCodec grpc.Code grpc.Compressor +grpc.CustomCodec grpc.Decompressor grpc.MaxMsgSize grpc.MethodConfig @@ -140,9 +142,7 @@ grpc.NewGZIPCompressor grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor -grpc.RoundRobin grpc.ServiceConfig -grpc.WithBalancer grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor @@ -151,9 +151,39 @@ grpc.WithMaxMsgSize grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier -naming.Resolver -naming.Update -naming.Watcher +info.SecurityVersion resolver.Backend resolver.GRPCLB' "${SC_OUT}" -) + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS new file mode 100644 index 00000000..2b00ddba --- /dev/null +++ b/vendor/google.golang.org/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS new file mode 100644 index 00000000..1fbd3e97 --- /dev/null +++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE new file mode 100644 index 00000000..49ea0f92 --- /dev/null +++ b/vendor/google.golang.org/protobuf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/google.golang.org/protobuf/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go new file mode 100644 index 00000000..369df13d --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/encoding/protowire" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type fileInfo struct { + *protogen.File + + allEnums []*enumInfo + allMessages []*messageInfo + allExtensions []*extensionInfo + + allEnumsByPtr map[*enumInfo]int // value is index into allEnums + allMessagesByPtr map[*messageInfo]int // value is index into allMessages + allMessageFieldsByPtr map[*messageInfo]*structFields + + // needRawDesc specifies whether the generator should emit logic to provide + // the legacy raw descriptor in GZIP'd form. + // This is updated by enum and message generation logic as necessary, + // and checked at the end of file generation. + needRawDesc bool +} + +type structFields struct { + count int + unexported map[int]string +} + +func (sf *structFields) append(name string) { + if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) { + if sf.unexported == nil { + sf.unexported = make(map[int]string) + } + sf.unexported[sf.count] = name + } + sf.count++ +} + +func newFileInfo(file *protogen.File) *fileInfo { + f := &fileInfo{File: file} + + // Collect all enums, messages, and extensions in "flattened ordering". + // See filetype.TypeBuilder. + var walkMessages func([]*protogen.Message, func(*protogen.Message)) + walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) { + for _, m := range messages { + f(m) + walkMessages(m.Messages, f) + } + } + initEnumInfos := func(enums []*protogen.Enum) { + for _, enum := range enums { + f.allEnums = append(f.allEnums, newEnumInfo(f, enum)) + } + } + initMessageInfos := func(messages []*protogen.Message) { + for _, message := range messages { + f.allMessages = append(f.allMessages, newMessageInfo(f, message)) + } + } + initExtensionInfos := func(extensions []*protogen.Extension) { + for _, extension := range extensions { + f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension)) + } + } + initEnumInfos(f.Enums) + initMessageInfos(f.Messages) + initExtensionInfos(f.Extensions) + walkMessages(f.Messages, func(m *protogen.Message) { + initEnumInfos(m.Enums) + initMessageInfos(m.Messages) + initExtensionInfos(m.Extensions) + }) + + // Derive a reverse mapping of enum and message pointers to their index + // in allEnums and allMessages. + if len(f.allEnums) > 0 { + f.allEnumsByPtr = make(map[*enumInfo]int) + for i, e := range f.allEnums { + f.allEnumsByPtr[e] = i + } + } + if len(f.allMessages) > 0 { + f.allMessagesByPtr = make(map[*messageInfo]int) + f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields) + for i, m := range f.allMessages { + f.allMessagesByPtr[m] = i + f.allMessageFieldsByPtr[m] = new(structFields) + } + } + + return f +} + +type enumInfo struct { + *protogen.Enum + + genJSONMethod bool + genRawDescMethod bool +} + +func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo { + e := &enumInfo{Enum: enum} + e.genJSONMethod = true + e.genRawDescMethod = true + return e +} + +type messageInfo struct { + *protogen.Message + + genRawDescMethod bool + genExtRangeMethod bool + + isTracked bool + hasWeak bool +} + +func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo { + m := &messageInfo{Message: message} + m.genRawDescMethod = true + m.genExtRangeMethod = true + m.isTracked = isTrackedMessage(m) + for _, field := range m.Fields { + m.hasWeak = m.hasWeak || field.Desc.IsWeak() + } + return m +} + +// isTrackedMessage reports whether field tracking is enabled on the message. +func isTrackedMessage(m *messageInfo) (tracked bool) { + const trackFieldUse_fieldNumber = 37383685 + + // Decode the option from unknown fields to avoid a dependency on the + // annotation proto from protoc-gen-go. + b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown() + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + if num == trackFieldUse_fieldNumber && typ == protowire.VarintType { + v, _ := protowire.ConsumeVarint(b) + tracked = protowire.DecodeBool(v) + } + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + return tracked +} + +type extensionInfo struct { + *protogen.Extension +} + +func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo { + x := &extensionInfo{Extension: extension} + return x +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go new file mode 100644 index 00000000..b2e3930f --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -0,0 +1,901 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal_gengo is internal to the protobuf module. +package internal_gengo + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/version" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +// SupportedFeatures reports the set of supported protobuf language features. +var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + +// GenerateVersionMarkers specifies whether to generate version markers. +var GenerateVersionMarkers = true + +// Standard library dependencies. +const ( + base64Package = protogen.GoImportPath("encoding/base64") + mathPackage = protogen.GoImportPath("math") + reflectPackage = protogen.GoImportPath("reflect") + sortPackage = protogen.GoImportPath("sort") + stringsPackage = protogen.GoImportPath("strings") + syncPackage = protogen.GoImportPath("sync") + timePackage = protogen.GoImportPath("time") + utf8Package = protogen.GoImportPath("unicode/utf8") +) + +// Protobuf library dependencies. +// +// These are declared as an interface type so that they can be more easily +// patched to support unique build environments that impose restrictions +// on the dependencies of generated source code. +var ( + protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") + protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") + protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") + protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson") + protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") + protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry") + protoV1Package goImportPath = protogen.GoImportPath("github.com/golang/protobuf/proto") +) + +type goImportPath interface { + String() string + Ident(string) protogen.GoIdent +} + +// GenerateFile generates the contents of a .pb.go file. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + filename := file.GeneratedFilenamePrefix + ".pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + f := newFileInfo(file) + + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number)) + genGeneratedHeader(gen, g, f) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number)) + + packageDoc := genPackageKnownComment(f) + g.P(packageDoc, "package ", f.GoPackageName) + g.P() + + // Emit a static check that enforces a minimum version of the proto package. + if GenerateVersionMarkers { + g.P("const (") + g.P("// Verify that this generated code is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")") + g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")") + g.P(")") + g.P() + + // TODO: Remove this after some soak-in period after the v2 release. + g.P("// This is a compile-time assertion that a sufficiently up-to-date version") + g.P("// of the legacy proto package is being used.") + g.P("const _ = ", protoV1Package.Ident("ProtoPackageIsVersion4")) + g.P() + } + + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + genImport(gen, g, f, imps.Get(i)) + } + for _, enum := range f.allEnums { + genEnum(g, f, enum) + } + for _, message := range f.allMessages { + genMessage(g, f, message) + } + genExtensions(g, f) + + genReflectFileDescriptor(gen, g, f) + + return g +} + +// genStandaloneComments prints all leading comments for a FileDescriptorProto +// location identified by the field number n. +func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) { + for _, loc := range f.Proto.GetSourceCodeInfo().GetLocation() { + if len(loc.Path) == 1 && loc.Path[0] == n { + for _, s := range loc.GetLeadingDetachedComments() { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.GetLeadingComments(); s != "" { + g.P(protogen.Comments(s)) + g.P() + } + } + } +} + +func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + + if GenerateVersionMarkers { + g.P("// versions:") + protocGenGoVersion := version.String() + protocVersion := "(unknown)" + if v := gen.Request.GetCompilerVersion(); v != nil { + protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + } + g.P("// \tprotoc-gen-go ", protocGenGoVersion) + g.P("// \tprotoc ", protocVersion) + } + + if f.Proto.GetOptions().GetDeprecated() { + g.P("// ", f.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", f.Desc.Path()) + } + g.P() +} + +func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) { + impFile, ok := gen.FilesByPath[imp.Path()] + if !ok { + return + } + if impFile.GoImportPath == f.GoImportPath { + // Don't generate imports or aliases for types in the same Go package. + return + } + // Generate imports for all non-weak dependencies, even if they are not + // referenced, because other code and tools depend on having the + // full transitive closure of protocol buffer types in the binary. + if !imp.IsWeak { + g.Import(impFile.GoImportPath) + } + if !imp.IsPublic { + return + } + + // Generate public imports by generating the imported file, parsing it, + // and extracting every symbol that should receive a forwarding declaration. + impGen := GenerateFile(gen, impFile) + impGen.Skip() + b, err := impGen.Content() + if err != nil { + gen.Error(err) + return + } + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments) + if err != nil { + gen.Error(err) + return + } + genForward := func(tok token.Token, name string, expr ast.Expr) { + // Don't import unexported symbols. + r, _ := utf8.DecodeRuneInString(name) + if !unicode.IsUpper(r) { + return + } + // Don't import the FileDescriptor. + if name == impFile.GoDescriptorIdent.GoName { + return + } + // Don't import decls referencing a symbol defined in another package. + // i.e., don't import decls which are themselves public imports: + // + // type T = somepackage.T + if _, ok := expr.(*ast.SelectorExpr); ok { + return + } + g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name)) + } + g.P("// Symbols defined in public import of ", imp.Path(), ".") + g.P() + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + genForward(decl.Tok, spec.Name.Name, spec.Type) + case *ast.ValueSpec: + for i, name := range spec.Names { + var expr ast.Expr + if i < len(spec.Values) { + expr = spec.Values[i] + } + genForward(decl.Tok, name.Name, expr) + } + case *ast.ImportSpec: + default: + panic(fmt.Sprintf("can't generate forward for spec type %T", spec)) + } + } + } + } + g.P() +} + +func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + // Enum type declaration. + g.Annotate(e.GoIdent.GoName, e.Location) + leadingComments := appendDeprecationSuffix(e.Comments.Leading, + e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated()) + g.P(leadingComments, + "type ", e.GoIdent, " int32") + + // Enum value constants. + g.P("const (") + for _, value := range e.Values { + g.Annotate(value.GoIdent.GoName, value.Location) + leadingComments := appendDeprecationSuffix(value.Comments.Leading, + value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated()) + g.P(leadingComments, + value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(), + trailingComment(value.Comments.Trailing)) + } + g.P(")") + g.P() + + // Enum value maps. + g.P("// Enum value maps for ", e.GoIdent, ".") + g.P("var (") + g.P(e.GoIdent.GoName+"_name", " = map[int32]string{") + for _, value := range e.Values { + duplicate := "" + if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) { + duplicate = "// Duplicate value: " + } + g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",") + } + g.P("}") + g.P(e.GoIdent.GoName+"_value", " = map[string]int32{") + for _, value := range e.Values { + g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",") + } + g.P("}") + g.P(")") + g.P() + + // Enum method. + // + // NOTE: A pointer value is needed to represent presence in proto2. + // Since a proto2 message can reference a proto3 enum, it is useful to + // always generate this method (even on proto3 enums) to support that case. + g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {") + g.P("p := new(", e.GoIdent, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + + // String method. + g.P("func (x ", e.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))") + g.P("}") + g.P() + + genEnumReflectMethods(g, f, e) + + // UnmarshalJSON method. + if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 { + g.P("// Deprecated: Do not use.") + g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {") + g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)") + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", e.GoIdent, "(num)") + g.P("return nil") + g.P("}") + g.P() + } + + // EnumDescriptor method. + if e.genRawDescMethod { + var indexes []string + for i := 1; i < len(e.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i]))) + } + g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.") + g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + if m.Desc.IsMapEntry() { + return + } + + // Message type declaration. + g.Annotate(m.GoIdent.GoName, m.Location) + leadingComments := appendDeprecationSuffix(m.Comments.Leading, + m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated()) + g.P(leadingComments, + "type ", m.GoIdent, " struct {") + genMessageFields(g, f, m) + g.P("}") + g.P() + + genMessageKnownFunctions(g, f, m) + genMessageDefaultDecls(g, f, m) + genMessageMethods(g, f, m) + genMessageOneofWrapperTypes(g, f, m) +} + +func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + sf := f.allMessageFieldsByPtr[m] + genMessageInternalFields(g, f, m, sf) + for _, field := range m.Fields { + genMessageField(g, f, m, field, sf) + } +} + +func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) { + g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState")) + sf.append(genid.State_goname) + g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache")) + sf.append(genid.SizeCache_goname) + if m.hasWeak { + g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields")) + sf.append(genid.WeakFields_goname) + } + g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields")) + sf.append(genid.UnknownFields_goname) + if m.Desc.ExtensionRanges().Len() > 0 { + g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields")) + sf.append(genid.ExtensionFields_goname) + } + if sf.count > 0 { + g.P() + } +} + +func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) { + if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() { + // It would be a bit simpler to iterate over the oneofs below, + // but generating the field here keeps the contents of the Go + // struct in the same order as the contents of the source + // .proto file. + if oneof.Fields[0] != field { + return // only generate for first appearance + } + + tags := structTags{ + {"protobuf_oneof", string(oneof.Desc.Name())}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location) + leadingComments := oneof.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)} + for _, field := range oneof.Fields { + ss = append(ss, "\t*"+field.GoIdent.GoName+"\n") + } + leadingComments += protogen.Comments(strings.Join(ss, "")) + g.P(leadingComments, + oneof.GoName, " ", oneofInterfaceName(oneof), tags) + sf.append(oneof.GoName) + return + } + goType, pointer := fieldGoType(g, f, field) + if pointer { + goType = "*" + goType + } + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + {"json", fieldJSONTagValue(field)}, + } + if field.Desc.IsMap() { + key := field.Message.Fields[0] + val := field.Message.Fields[1] + tags = append(tags, structTags{ + {"protobuf_key", fieldProtobufTagValue(key)}, + {"protobuf_val", fieldProtobufTagValue(val)}, + }...) + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + name := field.GoName + if field.Desc.IsWeak() { + name = genid.WeakFieldPrefix_goname + name + } + g.Annotate(m.GoIdent.GoName+"."+name, field.Location) + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + name, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + sf.append(field.GoName) +} + +// genMessageDefaultDecls generates consts and vars holding the default +// values of fields. +func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + var consts, vars []string + for _, field := range m.Fields { + if !field.Desc.HasDefault() { + continue + } + name := "Default_" + m.GoIdent.GoName + "_" + field.GoName + goType, _ := fieldGoType(g, f, field) + defVal := field.Desc.Default() + switch field.Desc.Kind() { + case protoreflect.StringKind: + consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String())) + case protoreflect.BytesKind: + vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes())) + case protoreflect.EnumKind: + idx := field.Desc.DefaultEnumValue().Index() + val := field.Enum.Values[idx] + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + case protoreflect.FloatKind, protoreflect.DoubleKind: + if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { + var fn, arg string + switch f := defVal.Float(); { + case math.IsInf(f, -1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1" + case math.IsInf(f, +1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1" + case math.IsNaN(f): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), "" + } + vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg)) + } else { + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f)) + } + default: + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface())) + } + } + if len(consts) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("const (") + for _, s := range consts { + g.P(s) + } + g.P(")") + } + if len(vars) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("var (") + for _, s := range vars { + g.P(s) + } + g.P(")") + } + g.P() +} + +func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + genMessageBaseMethods(g, f, m) + genMessageGetterMethods(g, f, m) + genMessageSetterMethods(g, f, m) +} + +func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + // Reset method. + g.P("func (x *", m.GoIdent, ") Reset() {") + g.P("*x = ", m.GoIdent, "{}") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {") + g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("}") + g.P() + + // String method. + g.P("func (x *", m.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)") + g.P("}") + g.P() + + // ProtoMessage method. + g.P("func (*", m.GoIdent, ") ProtoMessage() {}") + g.P() + + // ProtoReflect method. + genMessageReflectMethods(g, f, m) + + // Descriptor method. + if m.genRawDescMethod { + var indexes []string + for i := 1; i < len(m.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i]))) + } + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.") + g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } + + // ExtensionRangeArray method. + extRanges := m.Desc.ExtensionRanges() + if m.genExtRangeMethod && extRanges.Len() > 0 { + protoExtRange := protoifacePackage.Ident("ExtensionRangeV1") + extRangeVar := "extRange_" + m.GoIdent.GoName + g.P("var ", extRangeVar, " = []", protoExtRange, " {") + for i := 0; i < extRanges.Len(); i++ { + r := extRanges.Get(i) + g.P("{Start:", r[0], ", End:", r[1]-1 /* inclusive */, "},") + } + g.P("}") + g.P() + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor.ExtensionRanges instead.") + g.P("func (*", m.GoIdent, ") ExtensionRangeArray() []", protoExtRange, " {") + g.P("return ", extRangeVar) + g.P("}") + g.P() + } +} + +func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + genNoInterfacePragma(g, m.isTracked) + + // Getter for parent oneof. + if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() { + g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location) + g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {") + g.P("if m != nil {") + g.P("return m.", oneof.GoName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + } + + // Getter for message field. + goType, pointer := fieldGoType(g, f, field) + defaultValue := fieldDefaultValue(g, m, field) + g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + switch { + case field.Desc.IsWeak(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{") + g.P("var w ", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")") + g.P("}") + case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {") + g.P("return x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + default: + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + if !field.Desc.HasPresence() || defaultValue == "nil" { + g.P("if x != nil {") + } else { + g.P("if x != nil && x.", field.GoName, " != nil {") + } + star := "" + if pointer { + star = "*" + } + g.P("return ", star, " x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + } + g.P() + } +} + +func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + if !field.Desc.IsWeak() { + continue + } + + genNoInterfacePragma(g, m.isTracked) + + g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {") + g.P("var w *", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = &x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)") + g.P("}") + g.P() + } +} + +// fieldGoType returns the Go type used for a field. +// +// If it returns pointer=true, the struct field is a pointer to the type. +func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) { + if field.Desc.IsWeak() { + return "struct{}", false + } + + pointer = field.Desc.HasPresence() + switch field.Desc.Kind() { + case protoreflect.BoolKind: + goType = "bool" + case protoreflect.EnumKind: + goType = g.QualifiedGoIdent(field.Enum.GoIdent) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + goType = "int32" + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + goType = "uint32" + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + goType = "int64" + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + goType = "uint64" + case protoreflect.FloatKind: + goType = "float32" + case protoreflect.DoubleKind: + goType = "float64" + case protoreflect.StringKind: + goType = "string" + case protoreflect.BytesKind: + goType = "[]byte" + pointer = false // rely on nullability of slices for presence + case protoreflect.MessageKind, protoreflect.GroupKind: + goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent) + pointer = false // pointer captured as part of the type + } + switch { + case field.Desc.IsList(): + return "[]" + goType, false + case field.Desc.IsMap(): + keyType, _ := fieldGoType(g, f, field.Message.Fields[0]) + valType, _ := fieldGoType(g, f, field.Message.Fields[1]) + return fmt.Sprintf("map[%v]%v", keyType, valType), false + } + return goType, pointer +} + +func fieldProtobufTagValue(field *protogen.Field) string { + var enumName string + if field.Desc.Kind() == protoreflect.EnumKind { + enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc) + } + return tag.Marshal(field.Desc, enumName) +} + +func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protogen.Field) string { + if field.Desc.IsList() { + return "nil" + } + if field.Desc.HasDefault() { + defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName + if field.Desc.Kind() == protoreflect.BytesKind { + return "append([]byte(nil), " + defVarName + "...)" + } + return defVarName + } + switch field.Desc.Kind() { + case protoreflect.BoolKind: + return "false" + case protoreflect.StringKind: + return `""` + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: + return "nil" + case protoreflect.EnumKind: + return g.QualifiedGoIdent(field.Enum.Values[0].GoIdent) + default: + return "0" + } +} + +func fieldJSONTagValue(field *protogen.Field) string { + return string(field.Desc.Name()) + ",omitempty" +} + +func genExtensions(g *protogen.GeneratedFile, f *fileInfo) { + if len(f.allExtensions) == 0 { + return + } + + g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{") + for _, x := range f.allExtensions { + // For MessageSet extensions, the name used is the parent message. + name := x.Desc.FullName() + if messageset.IsMessageSetExtension(x.Desc) { + name = name.Parent() + } + + g.P("{") + g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),") + goType, pointer := fieldGoType(g, f, x.Extension) + if pointer { + goType = "*" + goType + } + g.P("ExtensionType: (", goType, ")(nil),") + g.P("Field: ", x.Desc.Number(), ",") + g.P("Name: ", strconv.Quote(string(name)), ",") + g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",") + g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",") + g.P("},") + } + g.P("}") + g.P() + + // Group extensions by the target message. + var orderedTargets []protogen.GoIdent + allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo) + allExtensionsByPtr := make(map[*extensionInfo]int) + for i, x := range f.allExtensions { + target := x.Extendee.GoIdent + if len(allExtensionsByTarget[target]) == 0 { + orderedTargets = append(orderedTargets, target) + } + allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x) + allExtensionsByPtr[x] = i + } + for _, target := range orderedTargets { + g.P("// Extension fields to ", target, ".") + g.P("var (") + for _, x := range allExtensionsByTarget[target] { + xd := x.Desc + typeName := xd.Kind().String() + switch xd.Kind() { + case protoreflect.EnumKind: + typeName = string(xd.Enum().FullName()) + case protoreflect.MessageKind, protoreflect.GroupKind: + typeName = string(xd.Message().FullName()) + } + fieldName := string(xd.Name()) + + leadingComments := x.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n", + xd.Cardinality(), typeName, fieldName, xd.Number())) + leadingComments = appendDeprecationSuffix(leadingComments, + x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + "E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]", + trailingComment(x.Comments.Trailing)) + } + g.P(")") + g.P() + } +} + +// genMessageOneofWrapperTypes generates the oneof wrapper types and +// associates the types with the parent message type. +func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, oneof := range m.Oneofs { + if oneof.Desc.IsSynthetic() { + continue + } + ifName := oneofInterfaceName(oneof) + g.P("type ", ifName, " interface {") + g.P(ifName, "()") + g.P("}") + g.P() + for _, field := range oneof.Fields { + g.Annotate(field.GoIdent.GoName, field.Location) + g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location) + g.P("type ", field.GoIdent, " struct {") + goType, _ := fieldGoType(g, f, field) + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + field.GoName, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + g.P("}") + g.P() + } + for _, field := range oneof.Fields { + g.P("func (*", field.GoIdent, ") ", ifName, "() {}") + g.P() + } + } +} + +// oneofInterfaceName returns the name of the interface type implemented by +// the oneof field value types. +func oneofInterfaceName(oneof *protogen.Oneof) string { + return "is" + oneof.GoIdent.GoName +} + +// genNoInterfacePragma generates a standalone "nointerface" pragma to +// decorate methods with field-tracking support. +func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) { + if tracked { + g.P("//go:nointerface") + g.P() + } +} + +var gotrackTags = structTags{{"go", "track"}} + +// structTags is a data structure for build idiomatic Go struct tags. +// Each [2]string is a key-value pair, where value is the unescaped string. +// +// Example: structTags{{"key", "value"}}.String() -> `key:"value"` +type structTags [][2]string + +func (tags structTags) String() string { + if len(tags) == 0 { + return "" + } + var ss []string + for _, tag := range tags { + // NOTE: When quoting the value, we need to make sure the backtick + // character does not appear. Convert all cases to the escaped hex form. + key := tag[0] + val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1) + ss = append(ss, fmt.Sprintf("%s:%s", key, val)) + } + return "`" + strings.Join(ss, " ") + "`" +} + +// appendDeprecationSuffix optionally appends a deprecation notice as a suffix. +func appendDeprecationSuffix(prefix protogen.Comments, deprecated bool) protogen.Comments { + if !deprecated { + return prefix + } + if prefix != "" { + prefix += "\n" + } + return prefix + " Deprecated: Do not use.\n" +} + +// trailingComment is like protogen.Comments, but lacks a trailing newline. +type trailingComment protogen.Comments + +func (c trailingComment) String() string { + s := strings.TrimSuffix(protogen.Comments(c).String(), "\n") + if strings.Contains(s, "\n") { + // We don't support multi-lined trailing comments as it is unclear + // how to best render them in the generated code. + return "" + } + return s +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go new file mode 100644 index 00000000..1319a126 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -0,0 +1,351 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor")) + g.P() + + genFileDescriptor(gen, g, f) + if len(f.allEnums) > 0 { + g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")") + } + if len(f.allMessages) > 0 { + g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")") + } + + // Generate a unique list of Go types for all declarations and dependencies, + // and the associated index into the type list for all dependencies. + var goTypes []string + var depIdxs []string + seen := map[protoreflect.FullName]int{} + genDep := func(name protoreflect.FullName, depSource string) { + if depSource != "" { + line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name) + depIdxs = append(depIdxs, line) + } + } + genEnum := func(e *protogen.Enum, depSource string) { + if e != nil { + name := e.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name) + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + genMessage := func(m *protogen.Message, depSource string) { + if m != nil { + name := m.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name) + if m.Desc.IsMapEntry() { + // Map entry messages have no associated Go type. + line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name) + } + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + + // This ordering is significant. + // See filetype.TypeBuilder.DependencyIndexes. + type offsetEntry struct { + start int + name string + } + var depOffsets []offsetEntry + for _, enum := range f.allEnums { + genEnum(enum.Enum, "") + } + for _, message := range f.allMessages { + genMessage(message.Message, "") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"}) + for _, message := range f.allMessages { + for _, field := range message.Fields { + if field.Desc.IsWeak() { + continue + } + source := string(field.Desc.FullName()) + genEnum(field.Enum, source+":type_name") + genMessage(field.Message, source+":type_name") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genMessage(extension.Extendee, source+":extendee") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genEnum(extension.Enum, source+":type_name") + genMessage(extension.Message, source+":type_name") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Input, source+":input_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Output, source+":output_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""}) + for i := len(depOffsets) - 2; i >= 0; i-- { + curr, next := depOffsets[i], depOffsets[i+1] + depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s", + curr.start, curr.start, next.start, curr.name)) + } + if len(depIdxs) > math.MaxInt32 { + panic("too many dependencies") // sanity check + } + + g.P("var ", goTypesVarName(f), " = []interface{}{") + for _, s := range goTypes { + g.P(s) + } + g.P("}") + + g.P("var ", depIdxsVarName(f), " = []int32{") + for _, s := range depIdxs { + g.P(s) + } + g.P("}") + + g.P("func init() { ", initFuncName(f.File), "() }") + + g.P("func ", initFuncName(f.File), "() {") + g.P("if ", f.GoDescriptorIdent, " != nil {") + g.P("return") + g.P("}") + + // Ensure that initialization functions for different files in the same Go + // package run in the correct order: Call the init funcs for every .proto file + // imported by this one that is in the same Go package. + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + impFile := gen.FilesByPath[imps.Get(i).Path()] + if impFile.GoImportPath != f.GoImportPath { + continue + } + g.P(initFuncName(impFile), "()") + } + + if len(f.allMessages) > 0 { + // Populate MessageInfo.Exporters. + g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {") + for _, message := range f.allMessages { + if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {") + g.P("switch v := v.(*", message.GoIdent, "); i {") + for i := 0; i < sf.count; i++ { + if name := sf.unexported[i]; name != "" { + g.P("case ", i, ": return &v.", name) + } + } + g.P("default: return nil") + g.P("}") + g.P("}") + } + } + g.P("}") + + // Populate MessageInfo.OneofWrappers. + for _, message := range f.allMessages { + if len(message.Oneofs) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + // Associate the wrapper types by directly passing them to the MessageInfo. + g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {") + for _, oneof := range message.Oneofs { + if !oneof.Desc.IsSynthetic() { + for _, field := range oneof.Fields { + g.P("(*", field.GoIdent, ")(nil),") + } + } + } + g.P("}") + } + } + } + + g.P("type x struct{}") + g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{") + g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{") + g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),") + g.P("RawDescriptor: ", rawDescVarName(f), ",") + g.P("NumEnums: ", len(f.allEnums), ",") + g.P("NumMessages: ", len(f.allMessages), ",") + g.P("NumExtensions: ", len(f.allExtensions), ",") + g.P("NumServices: ", len(f.Services), ",") + g.P("},") + g.P("GoTypes: ", goTypesVarName(f), ",") + g.P("DependencyIndexes: ", depIdxsVarName(f), ",") + if len(f.allEnums) > 0 { + g.P("EnumInfos: ", enumTypesVarName(f), ",") + } + if len(f.allMessages) > 0 { + g.P("MessageInfos: ", messageTypesVarName(f), ",") + } + if len(f.allExtensions) > 0 { + g.P("ExtensionInfos: ", extensionTypesVarName(f), ",") + } + g.P("}.Build()") + g.P(f.GoDescriptorIdent, " = out.File") + + // Set inputs to nil to allow GC to reclaim resources. + g.P(rawDescVarName(f), " = nil") + g.P(goTypesVarName(f), " = nil") + g.P(depIdxsVarName(f), " = nil") + g.P("}") +} + +func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto) + descProto.SourceCodeInfo = nil // drop source code information + + b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto) + if err != nil { + gen.Error(err) + return + } + + g.P("var ", rawDescVarName(f), " = []byte{") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") + g.P() + + if f.needRawDesc { + onceVar := rawDescVarName(f) + "Once" + dataVar := rawDescVarName(f) + "Data" + g.P("var (") + g.P(onceVar, " ", syncPackage.Ident("Once")) + g.P(dataVar, " = ", rawDescVarName(f)) + g.P(")") + g.P() + + g.P("func ", rawDescVarName(f), "GZIP() []byte {") + g.P(onceVar, ".Do(func() {") + g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")") + g.P("})") + g.P("return ", dataVar) + g.P("}") + g.P() + } +} + +func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + idx := f.allEnumsByPtr[e] + typesVar := enumTypesVarName(f) + + // Descriptor method. + g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {") + g.P("return ", typesVar, "[", idx, "].Descriptor()") + g.P("}") + g.P() + + // Type method. + g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {") + g.P("return &", typesVar, "[", idx, "]") + g.P("}") + g.P() + + // Number method. + g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {") + g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)") + g.P("}") + g.P() +} + +func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + idx := f.allMessagesByPtr[m] + typesVar := messageTypesVarName(f) + + // ProtoReflect method. + g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {") + g.P("mi := &", typesVar, "[", idx, "]") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("if ms.LoadMessageInfo() == nil {") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("return ms") + g.P("}") + g.P("return mi.MessageOf(x)") + g.P("}") + g.P() +} + +func fileVarName(f *protogen.File, suffix string) string { + prefix := f.GoDescriptorIdent.GoName + _, n := utf8.DecodeRuneInString(prefix) + prefix = strings.ToLower(prefix[:n]) + prefix[n:] + return prefix + "_" + suffix +} +func rawDescVarName(f *fileInfo) string { + return fileVarName(f.File, "rawDesc") +} +func goTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "goTypes") +} +func depIdxsVarName(f *fileInfo) string { + return fileVarName(f.File, "depIdxs") +} +func enumTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "enumTypes") +} +func messageTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "msgTypes") +} +func extensionTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "extTypes") +} +func initFuncName(f *protogen.File) string { + return fileVarName(f, "init") +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go new file mode 100644 index 00000000..9a1b7bdf --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -0,0 +1,1077 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "strings" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/genid" +) + +// Specialized support for well-known types are hard-coded into the generator +// as opposed to being injected in adjacent .go sources in the generated package +// in order to support specialized build systems like Bazel that always generate +// dynamically from the source .proto files. + +func genPackageKnownComment(f *fileInfo) protogen.Comments { + switch f.Desc.Path() { + case genid.File_google_protobuf_any_proto: + return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `. + + The Any message is a dynamic representation of any other message value. + It is functionally a tuple of the full name of the remote message type and + the serialized bytes of the remote message value. + + + Constructing an Any + + An Any message containing another message value is constructed using New: + + any, err := anypb.New(m) + if err != nil { + ... // handle error + } + ... // make use of any + + + Unmarshaling an Any + + With a populated Any message, the underlying message can be serialized into + a remote concrete message value in a few ways. + + If the exact concrete type is known, then a new (or pre-existing) instance + of that message can be passed to the UnmarshalTo method: + + m := new(foopb.MyMessage) + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + + If the exact concrete type is not known, then the UnmarshalNew method can be + used to unmarshal the contents into a new instance of the remote message type: + + m, err := any.UnmarshalNew() + if err != nil { + ... // handle error + } + ... // make use of m + + UnmarshalNew uses the global type registry to resolve the message type and + construct a new instance of that message to unmarshal into. In order for a + message type to appear in the global registry, the Go type representing that + protobuf message type must be linked into the Go binary. For messages + generated by protoc-gen-go, this is achieved through an import of the + generated Go package representing a .proto file. + + A common pattern with UnmarshalNew is to use a type switch with the resulting + proto.Message value: + + switch m := m.(type) { + case *foopb.MyMessage: + ... // make use of m as a *foopb.MyMessage + case *barpb.OtherMessage: + ... // make use of m as a *barpb.OtherMessage + case *bazpb.SomeMessage: + ... // make use of m as a *bazpb.SomeMessage + } + + This pattern ensures that the generated packages containing the message types + listed in the case clauses are linked into the Go binary and therefore also + registered in the global registry. + + + Type checking an Any + + In order to type check whether an Any message represents some other message, + then use the MessageIs method: + + if any.MessageIs((*foopb.MyMessage)(nil)) { + ... // make use of any, knowing that it contains a foopb.MyMessage + } + + The MessageIs method can also be used with an allocated instance of the target + message type if the intention is to unmarshal into it if the type matches: + + m := new(foopb.MyMessage) + if any.MessageIs(m) { + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + } + +` + case genid.File_google_protobuf_timestamp_proto: + return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `. + + The Timestamp message represents a timestamp, + an instant in time since the Unix epoch (January 1st, 1970). + + + Conversion to a Go Time + + The AsTime method can be used to convert a Timestamp message to a + standard Go time.Time value in UTC: + + t := ts.AsTime() + ... // make use of t as a time.Time + + Converting to a time.Time is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsTime method performs the conversion on a best-effort basis. Timestamps + with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) + are normalized during the conversion to a time.Time. To manually check for + invalid Timestamps per the documented limitations in timestamp.proto, + additionally call the CheckValid method: + + if err := ts.CheckValid(); err != nil { + ... // handle error + } + + + Conversion from a Go Time + + The timestamppb.New function can be used to construct a Timestamp message + from a standard Go time.Time value: + + ts := timestamppb.New(t) + ... // make use of ts as a *timestamppb.Timestamp + + In order to construct a Timestamp representing the current time, use Now: + + ts := timestamppb.Now() + ... // make use of ts as a *timestamppb.Timestamp + +` + case genid.File_google_protobuf_duration_proto: + return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `. + + The Duration message represents a signed span of time. + + + Conversion to a Go Duration + + The AsDuration method can be used to convert a Duration message to a + standard Go time.Duration value: + + d := dur.AsDuration() + ... // make use of d as a time.Duration + + Converting to a time.Duration is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsDuration method performs the conversion on a best-effort basis. + Durations with denormal values (e.g., nanoseconds beyond -99999999 and + +99999999, inclusive; or seconds and nanoseconds with opposite signs) + are normalized during the conversion to a time.Duration. To manually check for + invalid Duration per the documented limitations in duration.proto, + additionally call the CheckValid method: + + if err := dur.CheckValid(); err != nil { + ... // handle error + } + + Note that the documented limitations in duration.proto does not protect a + Duration from overflowing the representable range of a time.Duration in Go. + The AsDuration method uses saturation arithmetic such that an overflow clamps + the resulting value to the closest representable value (e.g., math.MaxInt64 + for positive overflow and math.MinInt64 for negative overflow). + + + Conversion from a Go Duration + + The durationpb.New function can be used to construct a Duration message + from a standard Go time.Duration value: + + dur := durationpb.New(d) + ... // make use of d as a *durationpb.Duration + +` + case genid.File_google_protobuf_struct_proto: + return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `. + + The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are + used to represent arbitrary JSON. The Value message represents a JSON value, + the Struct message represents a JSON object, and the ListValue message + represents a JSON array. See https://json.org for more information. + + The Value, Struct, and ListValue types have generated MarshalJSON and + UnmarshalJSON methods such that they serialize JSON equivalent to what the + messages themselves represent. Use of these types with the + "google.golang.org/protobuf/encoding/protojson" package + ensures that they will be serialized as their JSON equivalent. + + + Conversion to and from a Go interface + + The standard Go "encoding/json" package has functionality to serialize + arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and + ListValue.AsSlice methods can convert the protobuf message representation into + a form represented by interface{}, map[string]interface{}, and []interface{}. + This form can be used with other packages that operate on such data structures + and also directly with the standard json package. + + In order to convert the interface{}, map[string]interface{}, and []interface{} + forms back as Value, Struct, and ListValue messages, use the NewStruct, + NewList, and NewValue constructor functions. + + + Example usage + + Consider the following example JSON object: + + { + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [ + { + "type": "home", + "number": "212 555-1234" + }, + { + "type": "office", + "number": "646 555-4567" + } + ], + "children": [], + "spouse": null + } + + To construct a Value message representing the above JSON object: + + m, err := structpb.NewValue(map[string]interface{}{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": map[string]interface{}{ + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100", + }, + "phoneNumbers": []interface{}{ + map[string]interface{}{ + "type": "home", + "number": "212 555-1234", + }, + map[string]interface{}{ + "type": "office", + "number": "646 555-4567", + }, + }, + "children": []interface{}{}, + "spouse": nil, + }) + if err != nil { + ... // handle error + } + ... // make use of m as a *structpb.Value + +` + case genid.File_google_protobuf_field_mask_proto: + return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `. + + The FieldMask message represents a set of symbolic field paths. + The paths are specific to some target message type, + which is not stored within the FieldMask message itself. + + + Constructing a FieldMask + + The New function is used construct a FieldMask: + + var messageType *descriptorpb.DescriptorProto + fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") + if err != nil { + ... // handle error + } + ... // make use of fm + + The "field.name" and "field.number" paths are valid paths according to the + google.protobuf.DescriptorProto message. Use of a path that does not correlate + to valid fields reachable from DescriptorProto would result in an error. + + Once a FieldMask message has been constructed, + the Append method can be used to insert additional paths to the path set: + + var messageType *descriptorpb.DescriptorProto + if err := fm.Append(messageType, "options"); err != nil { + ... // handle error + } + + + Type checking a FieldMask + + In order to verify that a FieldMask represents a set of fields that are + reachable from some target message type, use the IsValid method: + + var messageType *descriptorpb.DescriptorProto + if fm.IsValid(messageType) { + ... // make use of fm + } + + IsValid needs to be passed the target message type as an input since the + FieldMask message itself does not store the message type that the set of paths + are for. +` + default: + return "" + } +} + +func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + switch m.Desc.FullName() { + case genid.Any_message_fullname: + g.P("// New marshals src into a new Any instance.") + g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {") + g.P(" dst := new(Any)") + g.P(" if err := dst.MarshalFrom(src); err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return dst, nil") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals src into dst as the underlying message") + g.P("// using the provided marshal options.") + g.P("//") + g.P("// If no options are specified, call dst.MarshalFrom instead.") + g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {") + g.P(" const urlPrefix = \"type.googleapis.com/\"") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" b, err := opts.Marshal(src)") + g.P(" if err != nil {") + g.P(" return err") + g.P(" }") + g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())") + g.P(" dst.Value = b") + g.P(" return nil") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the underlying message from src into dst") + g.P("// using the provided unmarshal options.") + g.P("// It reports an error if dst is not of the right message type.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalTo instead.") + g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" if !src.MessageIs(dst) {") + g.P(" got := dst.ProtoReflect().Descriptor().FullName()") + g.P(" want := src.MessageName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)") + g.P(" }") + g.P(" return opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the underlying message from src into dst,") + g.P("// which is newly created message using a type resolved from the type URL.") + g.P("// The message type is resolved according to opt.Resolver,") + g.P("// which should implement protoregistry.MessageTypeResolver.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalNew instead.") + g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {") + g.P(" if src.GetTypeUrl() == \"\" {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")") + g.P(" }") + g.P(" if opts.Resolver == nil {") + g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes")) + g.P(" }") + g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")") + g.P(" if !ok {") + g.P(" return nil, ", protoregistryPackage.Ident("NotFound")) + g.P(" }") + g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())") + g.P(" if err != nil {") + g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {") + g.P(" return nil, err") + g.P(" }") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)") + g.P(" }") + g.P(" dst = mt.New().Interface()") + g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// MessageIs reports whether the underlying message is of the same type as m.") + g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" if m == nil {") + g.P(" return false") + g.P(" }") + g.P(" url := x.GetTypeUrl()") + g.P(" name := string(m.ProtoReflect().Descriptor().FullName())") + g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {") + g.P(" return false") + g.P(" }") + g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'") + g.P("}") + g.P() + + g.P("// MessageName reports the full name of the underlying message,") + g.P("// returning an empty string if invalid.") + g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {") + g.P(" url := x.GetTypeUrl()") + g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)") + g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {") + g.P(" name = name[i+len(\"/\"):]") + g.P(" }") + g.P(" if !name.IsValid() {") + g.P(" return \"\"") + g.P(" }") + g.P(" return name") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals m into x as the underlying message.") + g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.") + g.P("// It resets m before performing the unmarshal operation.") + g.P("// It reports an error if m is not of the right message type.") + g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into") + g.P("// a newly allocated message of the specified type.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {") + g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + case genid.Timestamp_message_fullname: + g.P("// Now constructs a new Timestamp from the current time.") + g.P("func Now() *Timestamp {") + g.P(" return New(", timePackage.Ident("Now"), "())") + g.P("}") + g.P() + + g.P("// New constructs a new Timestamp from the provided time.Time.") + g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {") + g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}") + g.P("}") + g.P() + + g.P("// AsTime converts x to a time.Time.") + g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {") + g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()") + g.P("}") + g.P() + + g.P("// IsValid reports whether the timestamp is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Timestamp) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the timestamp is invalid.") + g.P("// In particular, it checks whether the value represents a date that is") + g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.") + g.P("// An error is reported for a nil Timestamp.") + g.P("func (x *Timestamp) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)") + g.P(" case invalidNanos:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanos") + g.P(")") + g.P() + + g.P("func (x *Timestamp) check() uint {") + g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive") + g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < minTimestamp:") + g.P(" return invalidUnderflow") + g.P(" case secs > maxTimestamp:") + g.P(" return invalidOverflow") + g.P(" case nanos < 0 || nanos >= 1e9:") + g.P(" return invalidNanos") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Duration_message_fullname: + g.P("// New constructs a new Duration from the provided time.Duration.") + g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {") + g.P(" nanos := d.Nanoseconds()") + g.P(" secs := nanos / 1e9") + g.P(" nanos -= secs * 1e9") + g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}") + g.P("}") + g.P() + + g.P("// AsDuration converts x to a time.Duration,") + g.P("// returning the closest duration value in the event of overflow.") + g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second")) + g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)") + g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond")) + g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)") + g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)") + g.P(" if overflow {") + g.P(" switch {") + g.P(" case secs < 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")") + g.P(" case secs > 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")") + g.P(" }") + g.P(" }") + g.P(" return d") + g.P("}") + g.P() + + g.P("// IsValid reports whether the duration is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Duration) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the duration is invalid.") + g.P("// In particular, it checks whether the value is within the range of") + g.P("// -10000 years to +10000 years inclusive.") + g.P("// An error is reported for a nil Duration.") + g.P("func (x *Duration) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)") + g.P(" case invalidNanosRange:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)") + g.P(" case invalidNanosSign:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanosRange") + g.P(" invalidNanosSign") + g.P(")") + g.P() + + g.P("func (x *Duration) check() uint {") + g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < -absDuration:") + g.P(" return invalidUnderflow") + g.P(" case secs > +absDuration:") + g.P(" return invalidOverflow") + g.P(" case nanos <= -1e9 || nanos >= +1e9:") + g.P(" return invalidNanosRange") + g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):") + g.P(" return invalidNanosSign") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Struct_message_fullname: + g.P("// NewStruct constructs a Struct from a general-purpose Go map.") + g.P("// The map keys must be valid UTF-8.") + g.P("// The map values are converted using NewValue.") + g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") + g.P(" for k, v := range v {") + g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)") + g.P(" }") + g.P(" var err error") + g.P(" x.Fields[k], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsMap converts x to a general-purpose Go map.") + g.P("// The map values are converted by calling Value.AsInterface.") + g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P(" vs := make(map[string]interface{})") + g.P(" for k, v := range x.GetFields() {") + g.P(" vs[k] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *Struct) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Struct) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.ListValue_message_fullname: + g.P("// NewList constructs a ListValue from a general-purpose Go slice.") + g.P("// The slice elements are converted using NewValue.") + g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P(" x := &ListValue{Values: make([]*Value, len(v))}") + g.P(" for i, v := range v {") + g.P(" var err error") + g.P(" x.Values[i], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsSlice converts x to a general-purpose Go slice.") + g.P("// The slice elements are converted by calling Value.AsInterface.") + g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P(" vs := make([]interface{}, len(x.GetValues()))") + g.P(" for i, v := range x.GetValues() {") + g.P(" vs[i] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.Value_message_fullname: + g.P("// NewValue constructs a Value from a general-purpose Go interface.") + g.P("//") + g.P("// ╔════════════════════════╤════════════════════════════════════════════╗") + g.P("// ║ Go type │ Conversion ║") + g.P("// ╠════════════════════════╪════════════════════════════════════════════╣") + g.P("// ║ nil │ stored as NullValue ║") + g.P("// ║ bool │ stored as BoolValue ║") + g.P("// ║ int, int32, int64 │ stored as NumberValue ║") + g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║") + g.P("// ║ float32, float64 │ stored as NumberValue ║") + g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") + g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") + g.P("// ║ map[string]interface{} │ stored as StructValue ║") + g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") + g.P("//") + g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") + g.P("// is possible since they are stored as a float64.") + g.P("func NewValue(v interface{}) (*Value, error) {") + g.P(" switch v := v.(type) {") + g.P(" case nil:") + g.P(" return NewNullValue(), nil") + g.P(" case bool:") + g.P(" return NewBoolValue(v), nil") + g.P(" case int:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case string:") + g.P(" if !", utf8Package.Ident("ValidString"), "(v) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)") + g.P(" }") + g.P(" return NewStringValue(v), nil") + g.P(" case []byte:") + g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") + g.P(" return NewStringValue(s), nil") + g.P(" case map[string]interface{}:") + g.P(" v2, err := NewStruct(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewStructValue(v2), nil") + g.P(" case []interface{}:") + g.P(" v2, err := NewList(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewListValue(v2), nil") + g.P(" default:") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)") + g.P(" }") + g.P("}") + g.P() + + g.P("// NewNullValue constructs a new null Value.") + g.P("func NewNullValue() *Value {") + g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}") + g.P("}") + g.P() + + g.P("// NewBoolValue constructs a new boolean Value.") + g.P("func NewBoolValue(v bool) *Value {") + g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}") + g.P("}") + g.P() + + g.P("// NewNumberValue constructs a new number Value.") + g.P("func NewNumberValue(v float64) *Value {") + g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}") + g.P("}") + g.P() + + g.P("// NewStringValue constructs a new string Value.") + g.P("func NewStringValue(v string) *Value {") + g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}") + g.P("}") + g.P() + + g.P("// NewStructValue constructs a new struct Value.") + g.P("func NewStructValue(v *Struct) *Value {") + g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}") + g.P("}") + g.P() + + g.P("// NewListValue constructs a new list Value.") + g.P("func NewListValue(v *ListValue) *Value {") + g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}") + g.P("}") + g.P() + + g.P("// AsInterface converts x to a general-purpose Go interface.") + g.P("//") + g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce") + g.P("// semantically equivalent JSON (assuming no errors occur).") + g.P("//") + g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") + g.P("// converted as strings to remain compatible with MarshalJSON.") + g.P("func (x *Value) AsInterface() interface{} {") + g.P(" switch v := x.GetKind().(type) {") + g.P(" case *Value_NumberValue:") + g.P(" if v != nil {") + g.P(" switch {") + g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):") + g.P(" return \"NaN\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):") + g.P(" return \"Infinity\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):") + g.P(" return \"-Infinity\"") + g.P(" default:") + g.P(" return v.NumberValue") + g.P(" }") + g.P(" }") + g.P(" case *Value_StringValue:") + g.P(" if v != nil {") + g.P(" return v.StringValue") + g.P(" }") + g.P(" case *Value_BoolValue:") + g.P(" if v != nil {") + g.P(" return v.BoolValue") + g.P(" }") + g.P(" case *Value_StructValue:") + g.P(" if v != nil {") + g.P(" return v.StructValue.AsMap()") + g.P(" }") + g.P(" case *Value_ListValue:") + g.P(" if v != nil {") + g.P(" return v.ListValue.AsSlice()") + g.P(" }") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func (x *Value) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Value) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.FieldMask_message_fullname: + g.P("// New constructs a field mask from a list of paths and verifies that") + g.P("// each one is valid according to the specified message type.") + g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {") + g.P(" x := new(FieldMask)") + g.P(" return x, x.Append(m, paths...)") + g.P("}") + g.P() + + g.P("// Union returns the union of all the paths in the input field masks.") + g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var out []string") + g.P(" out = append(out, mx.GetPaths()...)") + g.P(" out = append(out, my.GetPaths()...)") + g.P(" for _, m := range ms {") + g.P(" out = append(out, m.GetPaths()...)") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// Intersect returns the intersection of all the paths in the input field masks.") + g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var ss1, ss2 []string // reused buffers for performance") + g.P(" intersect := func(out, in []string) []string {") + g.P(" ss1 = normalizePaths(append(ss1[:0], in...))") + g.P(" ss2 = normalizePaths(append(ss2[:0], out...))") + g.P(" out = out[:0]") + g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {") + g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {") + g.P(" case hasPathPrefix(s1, s2):") + g.P(" out = append(out, s1)") + g.P(" i1++") + g.P(" case hasPathPrefix(s2, s1):") + g.P(" out = append(out, s2)") + g.P(" i2++") + g.P(" case lessPath(s1, s2):") + g.P(" i1++") + g.P(" case lessPath(s2, s1):") + g.P(" i2++") + g.P(" }") + g.P(" }") + g.P(" return out") + g.P(" }") + g.P() + g.P(" out := Union(mx, my, ms...).GetPaths()") + g.P(" out = intersect(out, mx.GetPaths())") + g.P(" out = intersect(out, my.GetPaths())") + g.P(" for _, m := range ms {") + g.P(" out = intersect(out, m.GetPaths())") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// IsValid reports whether all the paths are syntactically valid and") + g.P("// refer to known fields in the specified message type.") + g.P("// It reports false for a nil FieldMask.") + g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" paths := x.GetPaths()") + g.P(" return x != nil && numValidPaths(m, paths) == len(paths)") + g.P("}") + g.P() + + g.P("// Append appends a list of paths to the mask and verifies that each one") + g.P("// is valid according to the specified message type.") + g.P("// An invalid path is not appended and breaks insertion of subsequent paths.") + g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {") + g.P(" numValid := numValidPaths(m, paths)") + g.P(" x.Paths = append(x.Paths, paths[:numValid]...)") + g.P(" paths = paths[numValid:]") + g.P(" if len(paths) > 0 {") + g.P(" name := m.ProtoReflect().Descriptor().FullName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {") + g.P(" md0 := m.ProtoReflect().Descriptor()") + g.P(" for i, path := range paths {") + g.P(" md := md0") + g.P(" if !rangeFields(path, func(field string) bool {") + g.P(" // Search the field within the message.") + g.P(" if md == nil {") + g.P(" return false // not within a message") + g.P(" }") + g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))") + g.P(" // The real field name of a group is the message name.") + g.P(" if fd == nil {") + g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))") + g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {") + g.P(" fd = gd") + g.P(" }") + g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {") + g.P(" fd = nil") + g.P(" }") + g.P(" if fd == nil {") + g.P(" return false // message has does not have this field") + g.P(" }") + g.P() + g.P(" // Identify the next message to search within.") + g.P(" md = fd.Message() // may be nil") + g.P(" if fd.IsMap() {") + g.P(" md = fd.MapValue().Message() // may be nil") + g.P(" }") + g.P(" return true") + g.P(" }) {") + g.P(" return i") + g.P(" }") + g.P(" }") + g.P(" return len(paths)") + g.P("}") + g.P() + + g.P("// Normalize converts the mask to its canonical form where all paths are sorted") + g.P("// and redundant paths are removed.") + g.P("func (x *FieldMask) Normalize() {") + g.P(" x.Paths = normalizePaths(x.Paths)") + g.P("}") + g.P() + g.P("func normalizePaths(paths []string) []string {") + g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {") + g.P(" return lessPath(paths[i], paths[j])") + g.P(" })") + g.P() + g.P(" // Elide any path that is a prefix match on the previous.") + g.P(" out := paths[:0]") + g.P(" for _, path := range paths {") + g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {") + g.P(" continue") + g.P(" }") + g.P(" out = append(out, path)") + g.P(" }") + g.P(" return out") + g.P("}") + g.P() + + g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either") + g.P("// an exact matche or that the prefix is delimited by a dot.") + g.P("func hasPathPrefix(path, prefix string) bool {") + g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')") + g.P("}") + g.P() + + g.P("// lessPath is a lexicographical comparison where dot is specially treated") + g.P("// as the smallest symbol.") + g.P("func lessPath(x, y string) bool {") + g.P(" for i := 0; i < len(x) && i < len(y); i++ {") + g.P(" if x[i] != y[i] {") + g.P(" return (x[i] - '.') < (y[i] - '.')") + g.P(" }") + g.P(" }") + g.P(" return len(x) < len(y)") + g.P("}") + g.P() + + g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by") + g.P("// iterating over each field in place and calling a iterator function.") + g.P("func rangeFields(path string, f func(field string) bool) bool {") + g.P(" for {") + g.P(" var field string") + g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {") + g.P(" field, path = path[:i], path[i:]") + g.P(" } else {") + g.P(" field, path = path, \"\"") + g.P(" }") + g.P() + g.P(" if !f(field) {") + g.P(" return false") + g.P(" }") + g.P() + g.P(" if len(path) == 0 {") + g.P(" return true") + g.P(" }") + g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")") + g.P(" }") + g.P("}") + g.P() + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value") + typeName := strings.ToLower(funcName) + switch typeName { + case "float": + typeName = "float32" + case "double": + typeName = "float64" + case "bytes": + typeName = "[]byte" + } + + g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.") + g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {") + g.P(" return &", m.GoIdent, "{Value: v}") + g.P("}") + g.P() + } +} diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go new file mode 100644 index 00000000..3892d058 --- /dev/null +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -0,0 +1,1419 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protogen provides support for writing protoc plugins. +// +// Plugins for protoc, the Protocol Buffer compiler, +// are programs which read a CodeGeneratorRequest message from standard input +// and write a CodeGeneratorResponse message to standard output. +// This package provides support for writing plugins which generate Go code. +package protogen + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +const goPackageDocURL = "https://developers.google.com/protocol-buffers/docs/reference/go-generated#package" + +// Run executes a function as a protoc plugin. +// +// It reads a CodeGeneratorRequest message from os.Stdin, invokes the plugin +// function, and writes a CodeGeneratorResponse message to os.Stdout. +// +// If a failure occurs while reading or writing, Run prints an error to +// os.Stderr and calls os.Exit(1). +func (opts Options) Run(f func(*Plugin) error) { + if err := run(opts, f); err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), err) + os.Exit(1) + } +} + +func run(opts Options, f func(*Plugin) error) error { + if len(os.Args) > 1 { + return fmt.Errorf("unknown argument %q (this program should be run by protoc, not directly)", os.Args[1]) + } + in, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + req := &pluginpb.CodeGeneratorRequest{} + if err := proto.Unmarshal(in, req); err != nil { + return err + } + gen, err := opts.New(req) + if err != nil { + return err + } + if err := f(gen); err != nil { + // Errors from the plugin function are reported by setting the + // error field in the CodeGeneratorResponse. + // + // In contrast, errors that indicate a problem in protoc + // itself (unparsable input, I/O errors, etc.) are reported + // to stderr. + gen.Error(err) + } + resp := gen.Response() + out, err := proto.Marshal(resp) + if err != nil { + return err + } + if _, err := os.Stdout.Write(out); err != nil { + return err + } + return nil +} + +// A Plugin is a protoc plugin invocation. +type Plugin struct { + // Request is the CodeGeneratorRequest provided by protoc. + Request *pluginpb.CodeGeneratorRequest + + // Files is the set of files to generate and everything they import. + // Files appear in topological order, so each file appears before any + // file that imports it. + Files []*File + FilesByPath map[string]*File + + // SupportedFeatures is the set of protobuf language features supported by + // this generator plugin. See the documentation for + // google.protobuf.CodeGeneratorResponse.supported_features for details. + SupportedFeatures uint64 + + fileReg *protoregistry.Files + enumsByName map[protoreflect.FullName]*Enum + messagesByName map[protoreflect.FullName]*Message + annotateCode bool + pathType pathType + module string + genFiles []*GeneratedFile + opts Options + err error +} + +type Options struct { + // If ParamFunc is non-nil, it will be called with each unknown + // generator parameter. + // + // Plugins for protoc can accept parameters from the command line, + // passed in the --_out protoc, separated from the output + // directory with a colon; e.g., + // + // --go_out==,=: + // + // Parameters passed in this fashion as a comma-separated list of + // key=value pairs will be passed to the ParamFunc. + // + // The (flag.FlagSet).Set method matches this function signature, + // so parameters can be converted into flags as in the following: + // + // var flags flag.FlagSet + // value := flags.Bool("param", false, "") + // opts := &protogen.Options{ + // ParamFunc: flags.Set, + // } + // protogen.Run(opts, func(p *protogen.Plugin) error { + // if *value { ... } + // }) + ParamFunc func(name, value string) error + + // ImportRewriteFunc is called with the import path of each package + // imported by a generated file. It returns the import path to use + // for this package. + ImportRewriteFunc func(GoImportPath) GoImportPath +} + +// New returns a new Plugin. +func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { + gen := &Plugin{ + Request: req, + FilesByPath: make(map[string]*File), + fileReg: new(protoregistry.Files), + enumsByName: make(map[protoreflect.FullName]*Enum), + messagesByName: make(map[protoreflect.FullName]*Message), + opts: opts, + } + + packageNames := make(map[string]GoPackageName) // filename -> package name + importPaths := make(map[string]GoImportPath) // filename -> import path + mfiles := make(map[string]bool) // filename set + var packageImportPath GoImportPath + for _, param := range strings.Split(req.GetParameter(), ",") { + var value string + if i := strings.Index(param, "="); i >= 0 { + value = param[i+1:] + param = param[0:i] + } + switch param { + case "": + // Ignore. + case "import_path": + packageImportPath = GoImportPath(value) + case "module": + gen.module = value + case "paths": + switch value { + case "import": + gen.pathType = pathTypeImport + case "source_relative": + gen.pathType = pathTypeSourceRelative + default: + return nil, fmt.Errorf(`unknown path type %q: want "import" or "source_relative"`, value) + } + case "annotate_code": + switch value { + case "true", "": + gen.annotateCode = true + case "false": + default: + return nil, fmt.Errorf(`bad value for parameter %q: want "true" or "false"`, param) + } + default: + if param[0] == 'M' { + if i := strings.Index(value, ";"); i >= 0 { + pkgName := GoPackageName(value[i+1:]) + if otherName, ok := packageNames[param[1:]]; ok && pkgName != otherName { + return nil, fmt.Errorf("inconsistent package names for %q: %q != %q", value[:i], pkgName, otherName) + } + packageNames[param[1:]] = pkgName + value = value[:i] + } + importPaths[param[1:]] = GoImportPath(value) + mfiles[param[1:]] = true + continue + } + if opts.ParamFunc != nil { + if err := opts.ParamFunc(param, value); err != nil { + return nil, err + } + } + } + } + if gen.module != "" { + // When the module= option is provided, we strip the module name + // prefix from generated files. This only makes sense if generated + // filenames are based on the import path, so default to paths=import + // and complain if source_relative was selected manually. + switch gen.pathType { + case pathTypeLegacy: + gen.pathType = pathTypeImport + case pathTypeSourceRelative: + return nil, fmt.Errorf("cannot use module= with paths=source_relative") + } + } + + // Figure out the import path and package name for each file. + // + // The rules here are complicated and have grown organically over time. + // Interactions between different ways of specifying package information + // may be surprising. + // + // The recommended approach is to include a go_package option in every + // .proto source file specifying the full import path of the Go package + // associated with this file. + // + // option go_package = "google.golang.org/protobuf/types/known/anypb"; + // + // Build systems which want to exert full control over import paths may + // specify M= flags. + // + // Other approaches are not recommend. + generatedFileNames := make(map[string]bool) + for _, name := range gen.Request.FileToGenerate { + generatedFileNames[name] = true + } + // We need to determine the import paths before the package names, + // because the Go package name for a file is sometimes derived from + // different file in the same package. + packageNameForImportPath := make(map[GoImportPath]GoPackageName) + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + packageName, importPath := goPackageOption(fdesc) + switch { + case importPaths[filename] != "": + // Command line: Mfoo.proto=quux/bar + // + // Explicit mapping of source file to import path. + case generatedFileNames[filename] && packageImportPath != "": + // Command line: import_path=quux/bar + // + // The import_path flag sets the import path for every file that + // we generate code for. + importPaths[filename] = packageImportPath + case importPath != "": + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + importPaths[filename] = importPath + default: + // Source filename. + // + // Last resort when nothing else is available. + importPaths[filename] = GoImportPath(path.Dir(filename)) + } + if packageName != "" { + packageNameForImportPath[importPaths[filename]] = packageName + } + } + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + packageName, importPath := goPackageOption(fdesc) + defaultPackageName := packageNameForImportPath[importPaths[filename]] + switch { + case packageNames[filename] != "": + // A package name specified by the "M" command-line argument. + case packageName != "": + // TODO: For the "M" command-line argument, this means that the + // package name can be derived from the go_package option. + // Go package information should either consistently come from the + // command-line or the .proto source file, but not both. + // See how to make this consistent. + + // Source file: option go_package = "quux/bar"; + packageNames[filename] = packageName + case defaultPackageName != "": + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + packageNames[filename] = defaultPackageName + case generatedFileNames[filename] && packageImportPath != "": + // Command line: import_path=quux/bar + packageNames[filename] = cleanPackageName(path.Base(string(packageImportPath))) + case fdesc.GetPackage() != "": + // Source file: package quux.bar; + packageNames[filename] = cleanPackageName(fdesc.GetPackage()) + default: + // Source filename. + packageNames[filename] = cleanPackageName(baseName(filename)) + } + + goPkgOpt := string(importPaths[filename]) + if path.Base(string(goPkgOpt)) != string(packageNames[filename]) { + goPkgOpt += ";" + string(packageNames[filename]) + } + switch { + case packageImportPath != "": + // Command line: import_path=quux/bar + warn("Deprecated use of the 'import_path' command-line argument. In %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will no longer support the 'import_path' argument.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName(), goPkgOpt) + case mfiles[filename]: + // Command line: M=foo.proto=quux/bar + case packageName != "" && importPath == "": + // Source file: option go_package = "quux"; + warn("Deprecated use of 'go_package' option without a full import path in %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will require the import path be specified.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName(), goPkgOpt) + case packageName == "" && importPath == "": + // No Go package information provided. + dotIdx := strings.Index(goPkgOpt, ".") // heuristic for top-level domain + slashIdx := strings.Index(goPkgOpt, "/") // heuristic for multi-segment path + if isFull := 0 <= dotIdx && dotIdx <= slashIdx; isFull { + warn("Missing 'go_package' option in %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will require this be specified.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName(), goPkgOpt) + } else { + warn("Missing 'go_package' option in %q,\n"+ + "please specify it with the full Go package path as\n"+ + "a future release of protoc-gen-go will require this be specified.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName()) + } + } + } + + // Consistency check: Every file with the same Go import path should have + // the same Go package name. + packageFiles := make(map[GoImportPath][]string) + for filename, importPath := range importPaths { + if _, ok := packageNames[filename]; !ok { + // Skip files mentioned in a M= parameter + // but which do not appear in the CodeGeneratorRequest. + continue + } + packageFiles[importPath] = append(packageFiles[importPath], filename) + } + for importPath, filenames := range packageFiles { + for i := 1; i < len(filenames); i++ { + if a, b := packageNames[filenames[0]], packageNames[filenames[i]]; a != b { + return nil, fmt.Errorf("Go package %v has inconsistent names %v (%v) and %v (%v)", + importPath, a, filenames[0], b, filenames[i]) + } + } + } + + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + if gen.FilesByPath[filename] != nil { + return nil, fmt.Errorf("duplicate file name: %q", filename) + } + f, err := newFile(gen, fdesc, packageNames[filename], importPaths[filename]) + if err != nil { + return nil, err + } + gen.Files = append(gen.Files, f) + gen.FilesByPath[filename] = f + } + for _, filename := range gen.Request.FileToGenerate { + f, ok := gen.FilesByPath[filename] + if !ok { + return nil, fmt.Errorf("no descriptor for generated file: %v", filename) + } + f.Generate = true + } + return gen, nil +} + +// Error records an error in code generation. The generator will report the +// error back to protoc and will not produce output. +func (gen *Plugin) Error(err error) { + if gen.err == nil { + gen.err = err + } +} + +// Response returns the generator output. +func (gen *Plugin) Response() *pluginpb.CodeGeneratorResponse { + resp := &pluginpb.CodeGeneratorResponse{} + if gen.err != nil { + resp.Error = proto.String(gen.err.Error()) + return resp + } + for _, g := range gen.genFiles { + if g.skip { + continue + } + content, err := g.Content() + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + filename := g.filename + if gen.module != "" { + trim := gen.module + "/" + if !strings.HasPrefix(filename, trim) { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(fmt.Sprintf("%v: generated file does not match prefix %q", filename, gen.module)), + } + } + filename = strings.TrimPrefix(filename, trim) + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename), + Content: proto.String(string(content)), + }) + if gen.annotateCode && strings.HasSuffix(g.filename, ".go") { + meta, err := g.metaFile(content) + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename + ".meta"), + Content: proto.String(meta), + }) + } + } + if gen.SupportedFeatures > 0 { + resp.SupportedFeatures = proto.Uint64(gen.SupportedFeatures) + } + return resp +} + +// A File describes a .proto source file. +type File struct { + Desc protoreflect.FileDescriptor + Proto *descriptorpb.FileDescriptorProto + + GoDescriptorIdent GoIdent // name of Go variable for the file descriptor + GoPackageName GoPackageName // name of this file's Go package + GoImportPath GoImportPath // import path of this file's Go package + + Enums []*Enum // top-level enum declarations + Messages []*Message // top-level message declarations + Extensions []*Extension // top-level extension declarations + Services []*Service // top-level service declarations + + Generate bool // true if we should generate code for this file + + // GeneratedFilenamePrefix is used to construct filenames for generated + // files associated with this source file. + // + // For example, the source file "dir/foo.proto" might have a filename prefix + // of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go". + GeneratedFilenamePrefix string + + comments map[pathKey]CommentSet +} + +func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) { + desc, err := protodesc.NewFile(p, gen.fileReg) + if err != nil { + return nil, fmt.Errorf("invalid FileDescriptorProto %q: %v", p.GetName(), err) + } + if err := gen.fileReg.RegisterFile(desc); err != nil { + return nil, fmt.Errorf("cannot register descriptor %q: %v", p.GetName(), err) + } + f := &File{ + Desc: desc, + Proto: p, + GoPackageName: packageName, + GoImportPath: importPath, + comments: make(map[pathKey]CommentSet), + } + + // Determine the prefix for generated Go files. + prefix := p.GetName() + if ext := path.Ext(prefix); ext == ".proto" || ext == ".protodevel" { + prefix = prefix[:len(prefix)-len(ext)] + } + switch gen.pathType { + case pathTypeLegacy: + // The default is to derive the output filename from the Go import path + // if the file contains a go_package option,or from the input filename instead. + if _, importPath := goPackageOption(p); importPath != "" { + prefix = path.Join(string(importPath), path.Base(prefix)) + } + case pathTypeImport: + // If paths=import, the output filename is derived from the Go import path. + prefix = path.Join(string(f.GoImportPath), path.Base(prefix)) + case pathTypeSourceRelative: + // If paths=source_relative, the output filename is derived from + // the input filename. + } + f.GoDescriptorIdent = GoIdent{ + GoName: "File_" + strs.GoSanitized(p.GetName()), + GoImportPath: f.GoImportPath, + } + f.GeneratedFilenamePrefix = prefix + + for _, loc := range p.GetSourceCodeInfo().GetLocation() { + // Descriptors declarations are guaranteed to have unique comment sets. + // Other locations may not be unique, but we don't use them. + var leadingDetached []Comments + for _, s := range loc.GetLeadingDetachedComments() { + leadingDetached = append(leadingDetached, Comments(s)) + } + f.comments[newPathKey(loc.Path)] = CommentSet{ + LeadingDetached: leadingDetached, + Leading: Comments(loc.GetLeadingComments()), + Trailing: Comments(loc.GetTrailingComments()), + } + } + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + f.Messages = append(f.Messages, newMessage(gen, f, nil, mds.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + f.Extensions = append(f.Extensions, newField(gen, f, nil, xds.Get(i))) + } + for i, sds := 0, desc.Services(); i < sds.Len(); i++ { + f.Services = append(f.Services, newService(gen, f, sds.Get(i))) + } + for _, message := range f.Messages { + if err := message.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, extension := range f.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, service := range f.Services { + for _, method := range service.Methods { + if err := method.resolveDependencies(gen); err != nil { + return nil, err + } + } + } + return f, nil +} + +func (f *File) location(idxPath ...int32) Location { + return Location{ + SourceFile: f.Desc.Path(), + Path: idxPath, + } +} + +// goPackageOption interprets a file's go_package option. +// If there is no go_package, it returns ("", ""). +// If there's a simple name, it returns (pkg, ""). +// If the option implies an import path, it returns (pkg, impPath). +func goPackageOption(d *descriptorpb.FileDescriptorProto) (pkg GoPackageName, impPath GoImportPath) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "" + } + rawPkg, impPath := goPackageOptionRaw(opt) + pkg = cleanPackageName(rawPkg) + if string(pkg) != rawPkg && impPath != "" { + warn("Malformed 'go_package' option in %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will reject this.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", d.GetName(), string(impPath)+";"+string(pkg)) + } + return pkg, impPath +} +func goPackageOptionRaw(opt string) (rawPkg string, impPath GoImportPath) { + // A semicolon-delimited suffix delimits the import path and package name. + if i := strings.Index(opt, ";"); i >= 0 { + return opt[i+1:], GoImportPath(opt[:i]) + } + // The presence of a slash implies there's an import path. + if i := strings.LastIndex(opt, "/"); i >= 0 { + return opt[i+1:], GoImportPath(opt) + } + return opt, "" +} + +// An Enum describes an enum. +type Enum struct { + Desc protoreflect.EnumDescriptor + + GoIdent GoIdent // name of the generated Go type + + Values []*EnumValue // enum value declarations + + Location Location // location of this enum + Comments CommentSet // comments associated with this enum +} + +func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(int32(genid.DescriptorProto_EnumType_field_number), int32(desc.Index())) + } else { + loc = f.location(int32(genid.FileDescriptorProto_EnumType_field_number), int32(desc.Index())) + } + enum := &Enum{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + gen.enumsByName[desc.FullName()] = enum + for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ { + enum.Values = append(enum.Values, newEnumValue(gen, f, parent, enum, vds.Get(i))) + } + return enum +} + +// An EnumValue describes an enum value. +type EnumValue struct { + Desc protoreflect.EnumValueDescriptor + + GoIdent GoIdent // name of the generated Go declaration + + Parent *Enum // enum in which this value is declared + + Location Location // location of this enum value + Comments CommentSet // comments associated with this enum value +} + +func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc protoreflect.EnumValueDescriptor) *EnumValue { + // A top-level enum value's name is: EnumName_ValueName + // An enum value contained in a message is: MessageName_ValueName + // + // For historical reasons, enum value names are not camel-cased. + parentIdent := enum.GoIdent + if message != nil { + parentIdent = message.GoIdent + } + name := parentIdent.GoName + "_" + string(desc.Name()) + loc := enum.Location.appendPath(int32(genid.EnumDescriptorProto_Value_field_number), int32(desc.Index())) + return &EnumValue{ + Desc: desc, + GoIdent: f.GoImportPath.Ident(name), + Parent: enum, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } +} + +// A Message describes a message. +type Message struct { + Desc protoreflect.MessageDescriptor + + GoIdent GoIdent // name of the generated Go type + + Fields []*Field // message field declarations + Oneofs []*Oneof // message oneof declarations + + Enums []*Enum // nested enum declarations + Messages []*Message // nested message declarations + Extensions []*Extension // nested extension declarations + + Location Location // location of this message + Comments CommentSet // comments associated with this message +} + +func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(int32(genid.DescriptorProto_NestedType_field_number), int32(desc.Index())) + } else { + loc = f.location(int32(genid.FileDescriptorProto_MessageType_field_number), int32(desc.Index())) + } + message := &Message{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + gen.messagesByName[desc.FullName()] = message + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + message.Enums = append(message.Enums, newEnum(gen, f, message, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + message.Messages = append(message.Messages, newMessage(gen, f, message, mds.Get(i))) + } + for i, fds := 0, desc.Fields(); i < fds.Len(); i++ { + message.Fields = append(message.Fields, newField(gen, f, message, fds.Get(i))) + } + for i, ods := 0, desc.Oneofs(); i < ods.Len(); i++ { + message.Oneofs = append(message.Oneofs, newOneof(gen, f, message, ods.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + message.Extensions = append(message.Extensions, newField(gen, f, message, xds.Get(i))) + } + + // Resolve local references between fields and oneofs. + for _, field := range message.Fields { + if od := field.Desc.ContainingOneof(); od != nil { + oneof := message.Oneofs[od.Index()] + field.Oneof = oneof + oneof.Fields = append(oneof.Fields, field) + } + } + + // Field name conflict resolution. + // + // We assume well-known method names that may be attached to a generated + // message type, as well as a 'Get*' method for each field. For each + // field in turn, we add _s to its name until there are no conflicts. + // + // Any change to the following set of method names is a potential + // incompatible API change because it may change generated field names. + // + // TODO: If we ever support a 'go_name' option to set the Go name of a + // field, we should consider dropping this entirely. The conflict + // resolution algorithm is subtle and surprising (changing the order + // in which fields appear in the .proto source file can change the + // names of fields in generated code), and does not adapt well to + // adding new per-field methods such as setters. + usedNames := map[string]bool{ + "Reset": true, + "String": true, + "ProtoMessage": true, + "Marshal": true, + "Unmarshal": true, + "ExtensionRangeArray": true, + "ExtensionMap": true, + "Descriptor": true, + } + makeNameUnique := func(name string, hasGetter bool) string { + for usedNames[name] || (hasGetter && usedNames["Get"+name]) { + name += "_" + } + usedNames[name] = true + usedNames["Get"+name] = hasGetter + return name + } + for _, field := range message.Fields { + field.GoName = makeNameUnique(field.GoName, true) + field.GoIdent.GoName = message.GoIdent.GoName + "_" + field.GoName + if field.Oneof != nil && field.Oneof.Fields[0] == field { + // Make the name for a oneof unique as well. For historical reasons, + // this assumes that a getter method is not generated for oneofs. + // This is incorrect, but fixing it breaks existing code. + field.Oneof.GoName = makeNameUnique(field.Oneof.GoName, false) + field.Oneof.GoIdent.GoName = message.GoIdent.GoName + "_" + field.Oneof.GoName + } + } + + // Oneof field name conflict resolution. + // + // This conflict resolution is incomplete as it does not consider collisions + // with other oneof field types, but fixing it breaks existing code. + for _, field := range message.Fields { + if field.Oneof != nil { + Loop: + for { + for _, nestedMessage := range message.Messages { + if nestedMessage.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + for _, nestedEnum := range message.Enums { + if nestedEnum.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + break Loop + } + } + } + + return message +} + +func (message *Message) resolveDependencies(gen *Plugin) error { + for _, field := range message.Fields { + if err := field.resolveDependencies(gen); err != nil { + return err + } + } + for _, message := range message.Messages { + if err := message.resolveDependencies(gen); err != nil { + return err + } + } + for _, extension := range message.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return err + } + } + return nil +} + +// A Field describes a message field. +type Field struct { + Desc protoreflect.FieldDescriptor + + // GoName is the base name of this field's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "FieldName" + + // GoIdent is the base name of a top-level declaration for this field. + // For code generated by protoc-gen-go, this means a wrapper type named + // '{{GoIdent}}' for members fields of a oneof, and a variable named + // 'E_{{GoIdent}}' for extension fields. + GoIdent GoIdent // e.g., "MessageName_FieldName" + + Parent *Message // message in which this field is declared; nil if top-level extension + Oneof *Oneof // containing oneof; nil if not part of a oneof + Extendee *Message // extended message for extension fields; nil otherwise + + Enum *Enum // type for enum fields; nil otherwise + Message *Message // type for message or group fields; nil otherwise + + Location Location // location of this field + Comments CommentSet // comments associated with this field +} + +func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDescriptor) *Field { + var loc Location + switch { + case desc.IsExtension() && message == nil: + loc = f.location(int32(genid.FileDescriptorProto_Extension_field_number), int32(desc.Index())) + case desc.IsExtension() && message != nil: + loc = message.Location.appendPath(int32(genid.DescriptorProto_Extension_field_number), int32(desc.Index())) + default: + loc = message.Location.appendPath(int32(genid.DescriptorProto_Field_field_number), int32(desc.Index())) + } + camelCased := strs.GoCamelCase(string(desc.Name())) + var parentPrefix string + if message != nil { + parentPrefix = message.GoIdent.GoName + "_" + } + field := &Field{ + Desc: desc, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Parent: message, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + return field +} + +func (field *Field) resolveDependencies(gen *Plugin) error { + desc := field.Desc + switch desc.Kind() { + case protoreflect.EnumKind: + name := field.Desc.Enum().FullName() + enum, ok := gen.enumsByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for enum %v", desc.FullName(), name) + } + field.Enum = enum + case protoreflect.MessageKind, protoreflect.GroupKind: + name := desc.Message().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Message = message + } + if desc.IsExtension() { + name := desc.ContainingMessage().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Extendee = message + } + return nil +} + +// A Oneof describes a message oneof. +type Oneof struct { + Desc protoreflect.OneofDescriptor + + // GoName is the base name of this oneof's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "OneofName" + + // GoIdent is the base name of a top-level declaration for this oneof. + GoIdent GoIdent // e.g., "MessageName_OneofName" + + Parent *Message // message in which this oneof is declared + + Fields []*Field // fields that are part of this oneof + + Location Location // location of this oneof + Comments CommentSet // comments associated with this oneof +} + +func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof { + loc := message.Location.appendPath(int32(genid.DescriptorProto_OneofDecl_field_number), int32(desc.Index())) + camelCased := strs.GoCamelCase(string(desc.Name())) + parentPrefix := message.GoIdent.GoName + "_" + return &Oneof{ + Desc: desc, + Parent: message, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } +} + +// Extension is an alias of Field for documentation. +type Extension = Field + +// A Service describes a service. +type Service struct { + Desc protoreflect.ServiceDescriptor + + GoName string + + Methods []*Method // service method declarations + + Location Location // location of this service + Comments CommentSet // comments associated with this service +} + +func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service { + loc := f.location(int32(genid.FileDescriptorProto_Service_field_number), int32(desc.Index())) + service := &Service{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + for i, mds := 0, desc.Methods(); i < mds.Len(); i++ { + service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i))) + } + return service +} + +// A Method describes a method in a service. +type Method struct { + Desc protoreflect.MethodDescriptor + + GoName string + + Parent *Service // service in which this method is declared + + Input *Message + Output *Message + + Location Location // location of this method + Comments CommentSet // comments associated with this method +} + +func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method { + loc := service.Location.appendPath(int32(genid.ServiceDescriptorProto_Method_field_number), int32(desc.Index())) + method := &Method{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Parent: service, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + return method +} + +func (method *Method) resolveDependencies(gen *Plugin) error { + desc := method.Desc + + inName := desc.Input().FullName() + in, ok := gen.messagesByName[inName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), inName) + } + method.Input = in + + outName := desc.Output().FullName() + out, ok := gen.messagesByName[outName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), outName) + } + method.Output = out + + return nil +} + +// A GeneratedFile is a generated file. +type GeneratedFile struct { + gen *Plugin + skip bool + filename string + goImportPath GoImportPath + buf bytes.Buffer + packageNames map[GoImportPath]GoPackageName + usedPackageNames map[GoPackageName]bool + manualImports map[GoImportPath]bool + annotations map[string][]Location +} + +// NewGeneratedFile creates a new generated file with the given filename +// and import path. +func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) *GeneratedFile { + g := &GeneratedFile{ + gen: gen, + filename: filename, + goImportPath: goImportPath, + packageNames: make(map[GoImportPath]GoPackageName), + usedPackageNames: make(map[GoPackageName]bool), + manualImports: make(map[GoImportPath]bool), + annotations: make(map[string][]Location), + } + + // All predeclared identifiers in Go are already used. + for _, s := range types.Universe.Names() { + g.usedPackageNames[GoPackageName(s)] = true + } + + gen.genFiles = append(gen.genFiles, g) + return g +} + +// P prints a line to the generated output. It converts each parameter to a +// string following the same rules as fmt.Print. It never inserts spaces +// between parameters. +func (g *GeneratedFile) P(v ...interface{}) { + for _, x := range v { + switch x := x.(type) { + case GoIdent: + fmt.Fprint(&g.buf, g.QualifiedGoIdent(x)) + default: + fmt.Fprint(&g.buf, x) + } + } + fmt.Fprintln(&g.buf) +} + +// QualifiedGoIdent returns the string to use for a Go identifier. +// +// If the identifier is from a different Go package than the generated file, +// the returned name will be qualified (package.name) and an import statement +// for the identifier's package will be included in the file. +func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string { + if ident.GoImportPath == g.goImportPath { + return ident.GoName + } + if packageName, ok := g.packageNames[ident.GoImportPath]; ok { + return string(packageName) + "." + ident.GoName + } + packageName := cleanPackageName(baseName(string(ident.GoImportPath))) + for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ { + packageName = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[ident.GoImportPath] = packageName + g.usedPackageNames[packageName] = true + return string(packageName) + "." + ident.GoName +} + +// Import ensures a package is imported by the generated file. +// +// Packages referenced by QualifiedGoIdent are automatically imported. +// Explicitly importing a package with Import is generally only necessary +// when the import will be blank (import _ "package"). +func (g *GeneratedFile) Import(importPath GoImportPath) { + g.manualImports[importPath] = true +} + +// Write implements io.Writer. +func (g *GeneratedFile) Write(p []byte) (n int, err error) { + return g.buf.Write(p) +} + +// Skip removes the generated file from the plugin output. +func (g *GeneratedFile) Skip() { + g.skip = true +} + +// Unskip reverts a previous call to Skip, re-including the generated file in +// the plugin output. +func (g *GeneratedFile) Unskip() { + g.skip = false +} + +// Annotate associates a symbol in a generated Go file with a location in a +// source .proto file. +// +// The symbol may refer to a type, constant, variable, function, method, or +// struct field. The "T.sel" syntax is used to identify the method or field +// 'sel' on type 'T'. +func (g *GeneratedFile) Annotate(symbol string, loc Location) { + g.annotations[symbol] = append(g.annotations[symbol], loc) +} + +// Content returns the contents of the generated file. +func (g *GeneratedFile) Content() ([]byte, error) { + if !strings.HasSuffix(g.filename, ".go") { + return g.buf.Bytes(), nil + } + + // Reformat generated code. + original := g.buf.Bytes() + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + return nil, fmt.Errorf("%v: unparsable Go source: %v\n%v", g.filename, err, src.String()) + } + + // Collect a sorted list of all imports. + var importPaths [][2]string + rewriteImport := func(importPath string) string { + if f := g.gen.opts.ImportRewriteFunc; f != nil { + return string(f(GoImportPath(importPath))) + } + return importPath + } + for importPath := range g.packageNames { + pkgName := string(g.packageNames[GoImportPath(importPath)]) + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{pkgName, pkgPath}) + } + for importPath := range g.manualImports { + if _, ok := g.packageNames[importPath]; !ok { + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{"_", pkgPath}) + } + } + sort.Slice(importPaths, func(i, j int) bool { + return importPaths[i][1] < importPaths[j][1] + }) + + // Modify the AST to include a new import block. + if len(importPaths) > 0 { + // Insert block after package statement or + // possible comment attached to the end of the package statement. + pos := file.Package + tokFile := fset.File(file.Package) + pkgLine := tokFile.Line(file.Package) + for _, c := range file.Comments { + if tokFile.Line(c.Pos()) > pkgLine { + break + } + pos = c.End() + } + + // Construct the import block. + impDecl := &ast.GenDecl{ + Tok: token.IMPORT, + TokPos: pos, + Lparen: pos, + Rparen: pos, + } + for _, importPath := range importPaths { + impDecl.Specs = append(impDecl.Specs, &ast.ImportSpec{ + Name: &ast.Ident{ + Name: importPath[0], + NamePos: pos, + }, + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(importPath[1]), + ValuePos: pos, + }, + EndPos: pos, + }) + } + file.Decls = append([]ast.Decl{impDecl}, file.Decls...) + } + + var out bytes.Buffer + if err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(&out, fset, file); err != nil { + return nil, fmt.Errorf("%v: can not reformat Go source: %v", g.filename, err) + } + return out.Bytes(), nil +} + +// metaFile returns the contents of the file's metadata file, which is a +// text formatted string of the google.protobuf.GeneratedCodeInfo. +func (g *GeneratedFile) metaFile(content []byte) (string, error) { + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", content, 0) + if err != nil { + return "", err + } + info := &descriptorpb.GeneratedCodeInfo{} + + seenAnnotations := make(map[string]bool) + annotate := func(s string, ident *ast.Ident) { + seenAnnotations[s] = true + for _, loc := range g.annotations[s] { + info.Annotation = append(info.Annotation, &descriptorpb.GeneratedCodeInfo_Annotation{ + SourceFile: proto.String(loc.SourceFile), + Path: loc.Path, + Begin: proto.Int32(int32(fset.Position(ident.Pos()).Offset)), + End: proto.Int32(int32(fset.Position(ident.End()).Offset)), + }) + } + } + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + annotate(spec.Name.Name, spec.Name) + switch st := spec.Type.(type) { + case *ast.StructType: + for _, field := range st.Fields.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + case *ast.InterfaceType: + for _, field := range st.Methods.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + } + case *ast.ValueSpec: + for _, name := range spec.Names { + annotate(name.Name, name) + } + } + } + case *ast.FuncDecl: + if decl.Recv == nil { + annotate(decl.Name.Name, decl.Name) + } else { + recv := decl.Recv.List[0].Type + if s, ok := recv.(*ast.StarExpr); ok { + recv = s.X + } + if id, ok := recv.(*ast.Ident); ok { + annotate(id.Name+"."+decl.Name.Name, decl.Name) + } + } + } + } + for a := range g.annotations { + if !seenAnnotations[a] { + return "", fmt.Errorf("%v: no symbol matching annotation %q", g.filename, a) + } + } + + b, err := prototext.Marshal(info) + if err != nil { + return "", err + } + return string(b), nil +} + +// A GoIdent is a Go identifier, consisting of a name and import path. +// The name is a single identifier and may not be a dot-qualified selector. +type GoIdent struct { + GoName string + GoImportPath GoImportPath +} + +func (id GoIdent) String() string { return fmt.Sprintf("%q.%v", id.GoImportPath, id.GoName) } + +// newGoIdent returns the Go identifier for a descriptor. +func newGoIdent(f *File, d protoreflect.Descriptor) GoIdent { + name := strings.TrimPrefix(string(d.FullName()), string(f.Desc.Package())+".") + return GoIdent{ + GoName: strs.GoCamelCase(name), + GoImportPath: f.GoImportPath, + } +} + +// A GoImportPath is the import path of a Go package. +// For example: "google.golang.org/protobuf/compiler/protogen" +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// Ident returns a GoIdent with s as the GoName and p as the GoImportPath. +func (p GoImportPath) Ident(s string) GoIdent { + return GoIdent{GoName: s, GoImportPath: p} +} + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// cleanPackageName converts a string to a valid Go package name. +func cleanPackageName(name string) GoPackageName { + return GoPackageName(strs.GoSanitized(name)) +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[:i] + } + return name +} + +type pathType int + +const ( + pathTypeLegacy pathType = iota + pathTypeImport + pathTypeSourceRelative +) + +// A Location is a location in a .proto source file. +// +// See the google.protobuf.SourceCodeInfo documentation in descriptor.proto +// for details. +type Location struct { + SourceFile string + Path protoreflect.SourcePath +} + +// appendPath add elements to a Location's path, returning a new Location. +func (loc Location) appendPath(a ...int32) Location { + var n protoreflect.SourcePath + n = append(n, loc.Path...) + n = append(n, a...) + return Location{ + SourceFile: loc.SourceFile, + Path: n, + } +} + +// A pathKey is a representation of a location path suitable for use as a map key. +type pathKey struct { + s string +} + +// newPathKey converts a location path to a pathKey. +func newPathKey(idxPath []int32) pathKey { + buf := make([]byte, 4*len(idxPath)) + for i, x := range idxPath { + binary.LittleEndian.PutUint32(buf[i*4:], uint32(x)) + } + return pathKey{string(buf)} +} + +// CommentSet is a set of leading and trailing comments associated +// with a .proto descriptor declaration. +type CommentSet struct { + LeadingDetached []Comments + Leading Comments + Trailing Comments +} + +// Comments is a comments string as provided by protoc. +type Comments string + +// String formats the comments by inserting // to the start of each line, +// ensuring that there is a trailing newline. +// An empty comment is formatted as an empty string. +func (c Comments) String() string { + if c == "" { + return "" + } + var b []byte + for _, line := range strings.Split(strings.TrimSuffix(string(c), "\n"), "\n") { + b = append(b, "//"...) + b = append(b, line...) + b = append(b, "\n"...) + } + return string(b) +} + +var warnings = true + +func warn(format string, a ...interface{}) { + if warnings { + log.Printf("WARNING: "+format, a...) + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go new file mode 100644 index 00000000..cab95a42 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -0,0 +1,791 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable textproto format unmarshaler. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return error if there are any missing required fields. + AllowPartial bool + + // DiscardUnknown specifies whether to ignore unknown fields when parsing. + // An unknown field is any field whose field name or field number does not + // resolve to any known or extension field in the message. + // By default, unmarshal rejects unknown fields as an error. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message using options in +// UnmarshalOptions object. +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{text.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *text.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok text.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if messageDesc.FullName() == genid.Any_message_fullname { + return d.unmarshalAny(m, checkDelims) + } + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch typ := tok.Kind(); typ { + case text.Name: + // Continue below. + case text.EOF: + if checkDelims { + return text.ErrUnexpectedEOF + } + return nil + default: + if checkDelims && typ == text.MessageClose { + return nil + } + return d.unexpectedTokenError(tok) + } + + // Resolve the field descriptor. + var name pref.Name + var fd pref.FieldDescriptor + var xt pref.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: + name = pref.Name(tok.IdentName()) + fd = fieldDescs.ByName(name) + if fd == nil { + // The proto name of a group field is in all lowercase, + // while the textproto field name is the group message name. + gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { + fd = gd + } + } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { + fd = nil // reset since field name is actually the message name + } + + case text.TypeName: + // Handle extensions only. This code path is not for Any. + xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true + num := pref.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } + fd = fieldDescs.ByNumber(num) + if fd == nil { + xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) + } + } + + if xt != nil { + fd = xt.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } else if xtErr != nil && xtErr != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + // Handle unknown fields. + if fd == nil { + if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { + d.skipValue() + continue + } + return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) + } + + // Handle fields identified by field number. + if isFieldNumberName { + // TODO: Add an option to permit parsing field numbers. + // + // This requires careful thought as the MarshalOptions.EmitUnknown + // option allows formatting unknown fields as the field number and the + // best-effort textual representation of the field value. In that case, + // it may not be possible to unmarshal the value from a parser that does + // have information about the unknown field. + return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) + } + + switch { + case fd.IsList(): + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + list := m.Mutable(fd).List() + if err := d.unmarshalList(fd, list); err != nil { + return err + } + + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(fd, mmap); err != nil { + return err + } + + default: + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) + } + + if err := d.unmarshalSingular(fd, m); err != nil { + return err + } + seenNums.Set(num) + } + } + + return nil +} + +// findExtension returns protoreflect.ExtensionType from the Resolver if found. +func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { + xt, err := d.opts.Resolver.FindExtensionByName(xtName) + if err == nil { + return xt, nil + } + return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) +} + +// unmarshalSingular unmarshals a non-repeated field value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: + val, err = d.unmarshalScalar(fd) + } + if err == nil { + m.Set(fd, val) + } + return err +} + +// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + if tok.Kind() != text.Scalar { + return pref.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if b, ok := tok.Bool(); ok { + return pref.ValueOfBool(b), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, ok := tok.Int32(); ok { + return pref.ValueOfInt32(n), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, ok := tok.Int64(); ok { + return pref.ValueOfInt64(n), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, ok := tok.Uint32(); ok { + return pref.ValueOfUint32(n), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, ok := tok.Uint64(); ok { + return pref.ValueOfUint64(n), nil + } + + case pref.FloatKind: + if n, ok := tok.Float32(); ok { + return pref.ValueOfFloat32(n), nil + } + + case pref.DoubleKind: + if n, ok := tok.Float64(); ok { + return pref.ValueOfFloat64(n), nil + } + + case pref.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } + return pref.ValueOfString(s), nil + } + + case pref.BytesKind: + if b, ok := tok.String(); ok { + return pref.ValueOfBytes([]byte(b)), nil + } + + case pref.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. + if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { + return pref.ValueOfEnum(pref.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +// unmarshalList unmarshals into given protoreflect.List. A list value can +// either be in [] syntax or simply just a single scalar/message value. +func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + return nil + } + + default: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + return nil + } + } + + return d.unexpectedTokenError(tok) +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return pref.Value{}, err + } + return pval, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageOpen: + return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { + return err + } + default: + return d.unexpectedTokenError(tok) + } + } + + default: + return d.unexpectedTokenError(tok) + } +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { + var key pref.MapKey + var pval pref.Value +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.Name: + if tok.NameKind() != text.IdentName { + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) + } + d.skipValue() + continue Loop + } + // Continue below. + case text.MessageClose: + break Loop + default: + return d.unexpectedTokenError(tok) + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + if key.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + val, err := d.unmarshalScalar(fd.MapKey()) + if err != nil { + return err + } + key = val.MapKey() + + case genid.MapEntry_Value_field_name: + if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + } + if pval.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + pval, err = unmarshalMapValue() + if err != nil { + return err + } + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", name) + } + d.skipValue() + } + } + + if !key.IsValid() { + key = fd.MapKey().Default().MapKey() + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() + default: + pval = fd.MapValue().Default() + } + } + mmap.Set(key, pval) + return nil +} + +// unmarshalAny unmarshals an Any textproto. It can either be in expanded form +// or non-expanded form. +func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + var seenTypeUrl bool + var seenValue bool + var isExpanded bool + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + +Loop: + for { + // Read field name. Can only have 3 possible field names, i.e. type_url, + // value and type URL name inside []. + tok, err := d.Read() + if err != nil { + return err + } + if typ := tok.Kind(); typ != text.Name { + if checkDelims { + if typ == text.MessageClose { + break Loop + } + } else if typ == text.EOF { + break Loop + } + return d.unexpectedTokenError(tok) + } + + switch tok.NameKind() { + case text.IdentName: + // Both type_url and value fields require field separator :. + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + var ok bool + typeURL, ok = tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) + } + seenTypeUrl = true + + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + s, ok := tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) + } + bValue = []byte(s) + seenValue = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + + case text.TypeName: + if isExpanded { + return d.newError(tok.Pos(), "cannot have more than one type") + } + if seenTypeUrl { + return d.newError(tok.Pos(), "conflict with type_url field") + } + typeURL = tok.TypeName() + var err error + bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) + if err != nil { + return err + } + isExpanded = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + } + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + } + if len(bValue) > 0 { + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + } + return nil +} + +func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { + mt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) + } + // Create new message for the embedded message type and unmarshal the value + // field into it. + m := mt.New() + if err := d.unmarshalMessage(m, true); err != nil { + return nil, err + } + // Serialize the embedded message and return the resulting bytes. + b, err := proto.MarshalOptions{ + AllowPartial: true, // Never check required fields inside an Any. + Deterministic: true, + }.Marshal(m.Interface()) + if err != nil { + return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) + } + return b, nil +} + +// skipValue makes the decoder parse a field value in order to advance the read +// to the next field. It relies on Read returning an error if the types are not +// in valid sequence. +func (d decoder) skipValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for messages and lists. + switch tok.Kind() { + case text.MessageOpen: + return d.skipMessageValue() + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + return d.skipMessageValue() + default: + // Skip items. This will not validate whether skipped values are + // of the same type or not, same behavior as C++ + // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. + if err := d.skipValue(); err != nil { + return err + } + } + } + } + return nil +} + +// skipMessageValue makes the decoder parse and skip over all fields in a +// message. It assumes that the previous read type is MessageOpen. +func (d decoder) skipMessageValue() error { + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageClose: + return nil + case text.Name: + if err := d.skipValue(); err != nil { + return err + } + } + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go new file mode 100644 index 00000000..162b4f98 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package prototext marshals and unmarshals protocol buffer messages as the +// textproto format. +package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go new file mode 100644 index 00000000..0877d71c --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -0,0 +1,433 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "sort" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in textproto format using default +// options. Do not depend on the output being stable. It may change over time +// across different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable text format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // EmitASCII specifies whether to format strings and bytes as ASCII only + // as opposed to using UTF-8 encoding when possible. + EmitASCII bool + + // allowInvalidUTF8 specifies whether to permit the encoding of strings + // with invalid UTF-8. This is unexported as it is intended to only + // be specified by the Format method. + allowInvalidUTF8 bool + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // EmitUnknown specifies whether to emit unknown fields in the output. + // If specified, the unmarshaler may be unable to parse the output. + // The default is to exclude unknown fields. + EmitUnknown bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.allowInvalidUTF8 = true + o.AllowPartial = true + o.EmitUnknown = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal writes the given proto.Message in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. It may +// change over time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + var delims = [2]byte{'{', '}'} + + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return []byte{}, nil + } + + enc := encoder{internalEnc, o} + err = enc.marshalMessage(m.ProtoReflect(), false) + if err != nil { + return nil, err + } + out := enc.Bytes() + if len(o.Indent) > 0 && len(out) > 0 { + out = append(out, '\n') + } + if o.AllowPartial { + return out, nil + } + return out, proto.CheckInitialized(m) +} + +type encoder struct { + *text.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if inclDelims { + e.StartMessage() + defer e.EndMessage() + } + + // Handle Any expansion. + if messageDesc.FullName() == genid.Any_message_fullname { + if e.marshalAny(m) { + return nil + } + // If unable to expand, continue on to marshal Any as a regular message. + } + + // Marshal known fields. + fieldDescs := messageDesc.Fields() + size := fieldDescs.Len() + for i := 0; i < size; { + fd := fieldDescs.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + + if fd == nil || !m.Has(fd) { + continue + } + + name := fd.Name() + // Use type name for group field name. + if fd.Kind() == pref.GroupKind { + name = fd.Message().Name() + } + val := m.Get(fd) + if err := e.marshalField(string(name), val, fd); err != nil { + return err + } + } + + // Marshal extensions. + if err := e.marshalExtensions(m); err != nil { + return err + } + + // Marshal unknown fields. + if e.opts.EmitUnknown { + e.marshalUnknown(m.GetUnknown()) + } + + return nil +} + +// marshalField marshals the given field with protoreflect.Value. +func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) + case fd.IsMap(): + return e.marshalMap(name, val.Map(), fd) + default: + e.WriteName(name) + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + kind := fd.Kind() + switch kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + + case pref.Int32Kind, pref.Int64Kind, + pref.Sint32Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + e.WriteUint(val.Uint()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(string(val.Bytes())) + + case pref.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) + } else { + // Use numeric value if there is no enum description. + e.WriteInt(int64(num)) + } + + case pref.MessageKind, pref.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List as multiple name-value fields. +func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) + if err := e.marshalSingular(list.Get(i), fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals the given protoreflect.Map as multiple name-value fields. +func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { + var err error + mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() + + e.WriteName(string(genid.MapEntry_Key_field_name)) + err = e.marshalSingular(key.Value(), fd.MapKey()) + if err != nil { + return false + } + + e.WriteName(string(genid.MapEntry_Value_field_name)) + err = e.marshalSingular(val, fd.MapValue()) + if err != nil { + return false + } + return true + }) + return err +} + +// marshalExtensions marshals extension fields. +func (e encoder) marshalExtensions(m pref.Message) error { + type entry struct { + key string + value pref.Value + desc pref.FieldDescriptor + } + + // Get a sorted list based on field key first. + var entries []entry + m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + if !fd.IsExtension() { + return true + } + // For MessageSet extensions, the name used is the parent message. + name := fd.FullName() + if messageset.IsMessageSetExtension(fd) { + name = name.Parent() + } + entries = append(entries, entry{ + key: string(name), + value: v, + desc: fd, + }) + return true + }) + // Sort extensions lexicographically. + sort.Slice(entries, func(i, j int) bool { + return entries[i].key < entries[j].key + }) + + // Write out sorted list. + for _, entry := range entries { + // Extension field name is the proto field name enclosed in []. + name := "[" + entry.key + "]" + if err := e.marshalField(name, entry.value, entry.desc); err != nil { + return err + } + } + return nil +} + +// marshalUnknown parses the given []byte and marshals fields out. +// This function assumes proper encoding in the given []byte. +func (e encoder) marshalUnknown(b []byte) { + const dec = 10 + const hex = 16 + for len(b) > 0 { + num, wtype, n := protowire.ConsumeTag(b) + b = b[n:] + e.WriteName(strconv.FormatInt(int64(num), dec)) + + switch wtype { + case protowire.VarintType: + var v uint64 + v, n = protowire.ConsumeVarint(b) + e.WriteUint(v) + case protowire.Fixed32Type: + var v uint32 + v, n = protowire.ConsumeFixed32(b) + e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) + case protowire.Fixed64Type: + var v uint64 + v, n = protowire.ConsumeFixed64(b) + e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) + case protowire.BytesType: + var v []byte + v, n = protowire.ConsumeBytes(b) + e.WriteString(string(v)) + case protowire.StartGroupType: + e.StartMessage() + var v []byte + v, n = protowire.ConsumeGroup(num, b) + e.marshalUnknown(v) + e.EndMessage() + default: + panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) + } + + b = b[n:] + } +} + +// marshalAny marshals the given google.protobuf.Any message in expanded form. +// It returns true if it was able to marshal, else false. +func (e encoder) marshalAny(any pref.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + typeURL := any.Get(fdType).String() + mt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return false + } + m := mt.New().Interface() + + // Unmarshal bytes into embedded message. + fdValue := fds.ByNumber(genid.Any_Value_field_number) + value := any.Get(fdValue) + err = proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: e.opts.Resolver, + }.Unmarshal(value.Bytes(), m) + if err != nil { + return false + } + + // Get current encoder position. If marshaling fails, reset encoder output + // back to this position. + pos := e.Snapshot() + + // Field name is the proto field name enclosed in []. + e.WriteName("[" + typeURL + "]") + err = e.marshalMessage(m.ProtoReflect(), true) + if err != nil { + e.Reset(pos) + return false + } + return true +} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go new file mode 100644 index 00000000..a427f8b7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -0,0 +1,538 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protowire parses and formats the raw wire encoding. +// See https://developers.google.com/protocol-buffers/docs/encoding. +// +// For marshaling and unmarshaling entire protobuf messages, +// use the "google.golang.org/protobuf/proto" package instead. +package protowire + +import ( + "io" + "math" + "math/bits" + + "google.golang.org/protobuf/internal/errors" +) + +// Number represents the field number. +type Number int32 + +const ( + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 +) + +// IsValid reports whether the field number is semantically valid. +// +// Note that while numbers within the reserved range are semantically invalid, +// they are syntactically valid in the wire format. +// Implementations may treat records with reserved field numbers as unknown. +func (n Number) IsValid() bool { + return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber +} + +// Type represents the wire type. +type Type int8 + +const ( + VarintType Type = 0 + Fixed32Type Type = 5 + Fixed64Type Type = 1 + BytesType Type = 2 + StartGroupType Type = 3 + EndGroupType Type = 4 +) + +const ( + _ = -iota + errCodeTruncated + errCodeFieldNumber + errCodeOverflow + errCodeReserved + errCodeEndGroup +) + +var ( + errFieldNumber = errors.New("invalid field number") + errOverflow = errors.New("variable length integer overflow") + errReserved = errors.New("cannot parse reserved wire type") + errEndGroup = errors.New("mismatching end group marker") + errParse = errors.New("parse error") +) + +// ParseError converts an error code into an error value. +// This returns nil if n is a non-negative number. +func ParseError(n int) error { + if n >= 0 { + return nil + } + switch n { + case errCodeTruncated: + return io.ErrUnexpectedEOF + case errCodeFieldNumber: + return errFieldNumber + case errCodeOverflow: + return errOverflow + case errCodeReserved: + return errReserved + case errCodeEndGroup: + return errEndGroup + default: + return errParse + } +} + +// ConsumeField parses an entire field record (both tag and value) and returns +// the field number, the wire type, and the total length. +// This returns a negative length upon an error (see ParseError). +// +// The total length includes the tag header and the end group marker (if the +// field is a group). +func ConsumeField(b []byte) (Number, Type, int) { + num, typ, n := ConsumeTag(b) + if n < 0 { + return 0, 0, n // forward error code + } + m := ConsumeFieldValue(num, typ, b[n:]) + if m < 0 { + return 0, 0, m // forward error code + } + return num, typ, n + m +} + +// ConsumeFieldValue parses a field value and returns its length. +// This assumes that the field Number and wire Type have already been parsed. +// This returns a negative length upon an error (see ParseError). +// +// When parsing a group, the length includes the end group marker and +// the end group is verified to match the starting field number. +func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + switch typ { + case VarintType: + _, n = ConsumeVarint(b) + return n + case Fixed32Type: + _, n = ConsumeFixed32(b) + return n + case Fixed64Type: + _, n = ConsumeFixed64(b) + return n + case BytesType: + _, n = ConsumeBytes(b) + return n + case StartGroupType: + n0 := len(b) + for { + num2, typ2, n := ConsumeTag(b) + if n < 0 { + return n // forward error code + } + b = b[n:] + if typ2 == EndGroupType { + if num != num2 { + return errCodeEndGroup + } + return n0 - len(b) + } + + n = ConsumeFieldValue(num2, typ2, b) + if n < 0 { + return n // forward error code + } + b = b[n:] + } + case EndGroupType: + return errCodeEndGroup + default: + return errCodeReserved + } +} + +// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. +func AppendTag(b []byte, num Number, typ Type) []byte { + return AppendVarint(b, EncodeTag(num, typ)) +} + +// ConsumeTag parses b as a varint-encoded tag, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeTag(b []byte) (Number, Type, int) { + v, n := ConsumeVarint(b) + if n < 0 { + return 0, 0, n // forward error code + } + num, typ := DecodeTag(v) + if num < MinValidNumber { + return 0, 0, errCodeFieldNumber + } + return num, typ, n +} + +func SizeTag(num Number) int { + return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size +} + +// AppendVarint appends v to b as a varint-encoded uint64. +func AppendVarint(b []byte, v uint64) []byte { + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeVarint(b []byte) (v uint64, n int) { + var y uint64 + if len(b) <= 0 { + return 0, errCodeTruncated + } + v = uint64(b[0]) + if v < 0x80 { + return v, 1 + } + v -= 0x80 + + if len(b) <= 1 { + return 0, errCodeTruncated + } + y = uint64(b[1]) + v += y << 7 + if y < 0x80 { + return v, 2 + } + v -= 0x80 << 7 + + if len(b) <= 2 { + return 0, errCodeTruncated + } + y = uint64(b[2]) + v += y << 14 + if y < 0x80 { + return v, 3 + } + v -= 0x80 << 14 + + if len(b) <= 3 { + return 0, errCodeTruncated + } + y = uint64(b[3]) + v += y << 21 + if y < 0x80 { + return v, 4 + } + v -= 0x80 << 21 + + if len(b) <= 4 { + return 0, errCodeTruncated + } + y = uint64(b[4]) + v += y << 28 + if y < 0x80 { + return v, 5 + } + v -= 0x80 << 28 + + if len(b) <= 5 { + return 0, errCodeTruncated + } + y = uint64(b[5]) + v += y << 35 + if y < 0x80 { + return v, 6 + } + v -= 0x80 << 35 + + if len(b) <= 6 { + return 0, errCodeTruncated + } + y = uint64(b[6]) + v += y << 42 + if y < 0x80 { + return v, 7 + } + v -= 0x80 << 42 + + if len(b) <= 7 { + return 0, errCodeTruncated + } + y = uint64(b[7]) + v += y << 49 + if y < 0x80 { + return v, 8 + } + v -= 0x80 << 49 + + if len(b) <= 8 { + return 0, errCodeTruncated + } + y = uint64(b[8]) + v += y << 56 + if y < 0x80 { + return v, 9 + } + v -= 0x80 << 56 + + if len(b) <= 9 { + return 0, errCodeTruncated + } + y = uint64(b[9]) + v += y << 63 + if y < 2 { + return v, 10 + } + return 0, errCodeOverflow +} + +// SizeVarint returns the encoded size of a varint. +// The size is guaranteed to be within 1 and 10, inclusive. +func SizeVarint(v uint64) int { + // This computes 1 + (bits.Len64(v)-1)/7. + // 9/64 is a good enough approximation of 1/7 + return int(9*uint32(bits.Len64(v))+64) / 64 +} + +// AppendFixed32 appends v to b as a little-endian uint32. +func AppendFixed32(b []byte, v uint32) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24)) +} + +// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed32(b []byte) (v uint32, n int) { + if len(b) < 4 { + return 0, errCodeTruncated + } + v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return v, 4 +} + +// SizeFixed32 returns the encoded size of a fixed32; which is always 4. +func SizeFixed32() int { + return 4 +} + +// AppendFixed64 appends v to b as a little-endian uint64. +func AppendFixed64(b []byte, v uint64) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) +} + +// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed64(b []byte) (v uint64, n int) { + if len(b) < 8 { + return 0, errCodeTruncated + } + v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return v, 8 +} + +// SizeFixed64 returns the encoded size of a fixed64; which is always 8. +func SizeFixed64() int { + return 8 +} + +// AppendBytes appends v to b as a length-prefixed bytes value. +func AppendBytes(b []byte, v []byte) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeBytes(b []byte) (v []byte, n int) { + m, n := ConsumeVarint(b) + if n < 0 { + return nil, n // forward error code + } + if m > uint64(len(b[n:])) { + return nil, errCodeTruncated + } + return b[n:][:m], n + int(m) +} + +// SizeBytes returns the encoded size of a length-prefixed bytes value, +// given only the length. +func SizeBytes(n int) int { + return SizeVarint(uint64(n)) + n +} + +// AppendString appends v to b as a length-prefixed bytes value. +func AppendString(b []byte, v string) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeString parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeString(b []byte) (v string, n int) { + bb, n := ConsumeBytes(b) + return string(bb), n +} + +// AppendGroup appends v to b as group value, with a trailing end group marker. +// The value v must not contain the end marker. +func AppendGroup(b []byte, num Number, v []byte) []byte { + return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) +} + +// ConsumeGroup parses b as a group value until the trailing end group marker, +// and verifies that the end marker matches the provided num. The value v +// does not contain the end marker, while the length does contain the end marker. +// This returns a negative length upon an error (see ParseError). +func ConsumeGroup(num Number, b []byte) (v []byte, n int) { + n = ConsumeFieldValue(num, StartGroupType, b) + if n < 0 { + return nil, n // forward error code + } + b = b[:n] + + // Truncate off end group marker, but need to handle denormalized varints. + // Assuming end marker is never 0 (which is always the case since + // EndGroupType is non-zero), we can truncate all trailing bytes where the + // lower 7 bits are all zero (implying that the varint is denormalized). + for len(b) > 0 && b[len(b)-1]&0x7f == 0 { + b = b[:len(b)-1] + } + b = b[:len(b)-SizeTag(num)] + return b, n +} + +// SizeGroup returns the encoded size of a group, given only the length. +func SizeGroup(num Number, n int) int { + return n + SizeTag(num) +} + +// DecodeTag decodes the field Number and wire Type from its unified form. +// The Number is -1 if the decoded field number overflows int32. +// Other than overflow, this does not check for field number validity. +func DecodeTag(x uint64) (Number, Type) { + // NOTE: MessageSet allows for larger field numbers than normal. + if x>>3 > uint64(math.MaxInt32) { + return -1, 0 + } + return Number(x >> 3), Type(x & 7) +} + +// EncodeTag encodes the field Number and wire Type into its unified form. +func EncodeTag(num Number, typ Type) uint64 { + return uint64(num)<<3 | uint64(typ&7) +} + +// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} +// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} +func DecodeZigZag(x uint64) int64 { + return int64(x>>1) ^ int64(x)<<63>>63 +} + +// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} +// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} +func EncodeZigZag(x int64) uint64 { + return uint64(x<<1) ^ uint64(x>>63) +} + +// DecodeBool decodes a uint64 as a bool. +// Input: { 0, 1, 2, …} +// Output: {false, true, true, …} +func DecodeBool(x uint64) bool { + return x != 0 +} + +// EncodeBool encodes a bool as a uint64. +// Input: {false, true} +// Output: { 0, 1} +func EncodeBool(x bool) uint64 { + if x { + return 1 + } + return 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go new file mode 100644 index 00000000..e7af0fe0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -0,0 +1,316 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descfmt provides functionality to format descriptors. +package descfmt + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type list interface { + Len() int + pragma.DoNotImplement +} + +func FormatList(s fmt.State, r rune, vs list) { + io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatListOpt(vs list, isRoot, allowMulti bool) string { + start, end := "[", "]" + if isRoot { + var name string + switch vs.(type) { + case pref.Names: + name = "Names" + case pref.FieldNumbers: + name = "FieldNumbers" + case pref.FieldRanges: + name = "FieldRanges" + case pref.EnumRanges: + name = "EnumRanges" + case pref.FileImports: + name = "FileImports" + case pref.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + } + start, end = name+"{", "}" + } + + var ss []string + switch vs := vs.(type) { + case pref.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive + } + } + return start + joinStrings(ss, false) + end + case pref.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive + } + } + return start + joinStrings(ss, false) + end + case pref.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + ss = append(ss, "{"+rs.Join()+"}") + } + return start + joinStrings(ss, allowMulti) + end + default: + _, isEnumValue := vs.(pref.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() + ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +} + +// descriptorAccessors is a list of accessors to print for each descriptor. +// +// Do not print all accessors since some contain redundant information, +// while others are pointers that we do not want to follow since the descriptor +// is actually a cyclic graph. +// +// Using a list allows us to print the accessors in a sensible order. +var descriptorAccessors = map[reflect.Type][]string{ + reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +} + +func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + + start, end := "{", "}" + if isRoot { + start = rt.Name() + "{" + } + + _, isFile := t.(pref.FileDescriptor) + rs := records{allowMulti: allowMulti} + if t.IsPlaceholder() { + if isFile { + rs.Append(rv, "Path", "Package", "IsPlaceholder") + } else { + rs.Append(rv, "FullName", "IsPlaceholder") + } + } else { + switch { + case isFile: + rs.Append(rv, "Syntax") + case isRoot: + rs.Append(rv, "Syntax", "FullName") + default: + rs.Append(rv, "Name") + } + switch t := t.(type) { + case pref.FieldDescriptor: + for _, s := range descriptorAccessors[rt] { + switch s { + case "MapKey": + if k := t.MapKey(); k != nil { + rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) + } + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { + case pref.EnumKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + case pref.MessageKind, pref.GroupKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + } + } + case "ContainingOneof": + if od := t.ContainingOneof(); od != nil { + rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + } + case "ContainingMessage": + if t.IsExtension() { + rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + } + case "Message": + if !t.IsMap() { + rs.Append(rv, s) + } + default: + rs.Append(rv, s) + } + } + case pref.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { + ss = append(ss, string(fs.Get(i).Name())) + } + if len(ss) > 0 { + rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + } + default: + rs.Append(rv, descriptorAccessors[rt]...) + } + if rv.MethodByName("GoType").IsValid() { + rs.Append(rv, "GoType") + } + } + return start + rs.Join() + end +} + +type records struct { + recs [][2]string + allowMulti bool +} + +func (rs *records) Append(v reflect.Value, accessors ...string) { + for _, a := range accessors { + var rv reflect.Value + if m := v.MethodByName(a); m.IsValid() { + rv = m.Call(nil)[0] + } + if v.Kind() == reflect.Struct && !rv.IsValid() { + rv = v.FieldByName(a) + } + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + } + if _, ok := rv.Interface().(pref.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() + } + } + + // Ignore zero values. + var isZero bool + switch rv.Kind() { + case reflect.Interface, reflect.Slice: + isZero = rv.IsNil() + case reflect.Bool: + isZero = rv.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + isZero = rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + isZero = rv.Uint() == 0 + case reflect.String: + isZero = rv.String() == "" + } + if n, ok := rv.Interface().(list); ok { + isZero = n.Len() == 0 + } + if isZero { + continue + } + + // Format the value. + var s string + v := rv.Interface() + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) + case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: + s = string(v.(pref.Descriptor).Name()) + case pref.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) + case []byte: + s = fmt.Sprintf("%q", v) + default: + s = fmt.Sprint(v) + } + rs.recs = append(rs.recs, [2]string{a, s}) + } +} + +func (rs *records) Join() string { + var ss []string + + // In single line mode, simply join all records with commas. + if !rs.allowMulti { + for _, r := range rs.recs { + ss = append(ss, r[0]+formatColon(0)+r[1]) + } + return joinStrings(ss, false) + } + + // In allowMulti line mode, align single line records for more readable output. + var maxLen int + flush := func(i int) { + for _, r := range rs.recs[len(ss):i] { + ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) + } + maxLen = 0 + } + for i, r := range rs.recs { + if isMulti := strings.Contains(r[1], "\n"); isMulti { + flush(i) + ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) + } else if maxLen < len(r[0]) { + maxLen = len(r[0]) + } + } + flush(len(rs.recs)) + return joinStrings(ss, true) +} + +func formatColon(padding int) string { + // Deliberately introduce instability into the debug output to + // discourage users from performing string comparisons. + // This provides us flexibility to change the output in the future. + if detrand.Bool() { + return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) + } else { + return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) + } +} + +func joinStrings(ss []string, isMulti bool) string { + if len(ss) == 0 { + return "" + } + if isMulti { + return "\n\t" + strings.Join(ss, "\n\t") + "\n" + } + return strings.Join(ss, ", ") +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go new file mode 100644 index 00000000..8401be8c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descopts contains the nil pointers to concrete descriptor options. +// +// This package exists as a form of reverse dependency injection so that certain +// packages (e.g., internal/filedesc and internal/filetype can avoid a direct +// dependency on the descriptor proto package). +package descopts + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +// These variables are set by the init function in descriptor.pb.go via logic +// in internal/filetype. In other words, so long as the descriptor proto package +// is linked in, these variables will be populated. +// +// Each variable is populated with a nil pointer to the options struct. +var ( + File pref.ProtoMessage + Enum pref.ProtoMessage + EnumValue pref.ProtoMessage + Message pref.ProtoMessage + Field pref.ProtoMessage + Oneof pref.ProtoMessage + ExtensionRange pref.ProtoMessage + Service pref.ProtoMessage + Method pref.ProtoMessage +) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go new file mode 100644 index 00000000..a904dd1f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package detrand provides deterministically random functionality. +// +// The pseudo-randomness of these functions is seeded by the program binary +// itself and guarantees that the output does not change within a program, +// while ensuring that the output is unstable across different builds. +package detrand + +import ( + "encoding/binary" + "hash/fnv" + "os" +) + +// Disable disables detrand such that all functions returns the zero value. +// This function is not concurrent-safe and must be called during program init. +func Disable() { + randSeed = 0 +} + +// Bool returns a deterministically random boolean. +func Bool() bool { + return randSeed%2 == 1 +} + +// randSeed is a best-effort at an approximate hash of the Go binary. +var randSeed = binaryHash() + +func binaryHash() uint64 { + // Open the Go binary. + s, err := os.Executable() + if err != nil { + return 0 + } + f, err := os.Open(s) + if err != nil { + return 0 + } + defer f.Close() + + // Hash the size and several samples of the Go binary. + const numSamples = 8 + var buf [64]byte + h := fnv.New64() + fi, err := f.Stat() + if err != nil { + return 0 + } + binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) + h.Write(buf[:8]) + for i := int64(0); i < numSamples; i++ { + if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { + return 0 + } + h.Write(buf[:]) + } + return h.Sum64() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go new file mode 100644 index 00000000..fdd9b13f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defval marshals and unmarshals textual forms of default values. +// +// This package handles both the form historically used in Go struct field tags +// and also the form used by google.protobuf.FieldDescriptorProto.default_value +// since they differ in superficial ways. +package defval + +import ( + "fmt" + "math" + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" + errors "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Format is the serialization format used to represent the default value. +type Format int + +const ( + _ Format = iota + + // Descriptor uses the serialization format that protoc uses with the + // google.protobuf.FieldDescriptorProto.default_value field. + Descriptor + + // GoTag uses the historical serialization format in Go struct field tags. + GoTag +) + +// Unmarshal deserializes the default string s according to the given kind k. +// When k is an enum, a list of enum value descriptors must be provided. +func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + switch s { + case "1": + return pref.ValueOfBool(true), nil, nil + case "0": + return pref.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": + return pref.ValueOfBool(true), nil, nil + case "false": + return pref.ValueOfBool(false), nil, nil + } + } + case pref.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { + if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. + ev := evs.ByName(pref.Name(s)) + if ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { + return pref.ValueOfInt32(int32(v)), nil, nil + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return pref.ValueOfInt64(int64(v)), nil, nil + } + case pref.Uint32Kind, pref.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { + return pref.ValueOfUint32(uint32(v)), nil, nil + } + case pref.Uint64Kind, pref.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + return pref.ValueOfUint64(uint64(v)), nil, nil + } + case pref.FloatKind, pref.DoubleKind: + var v float64 + var err error + switch s { + case "-inf": + v = math.Inf(-1) + case "inf": + v = math.Inf(+1) + case "nan": + v = math.NaN() + default: + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { + if k == pref.FloatKind { + return pref.ValueOfFloat32(float32(v)), nil, nil + } else { + return pref.ValueOfFloat64(float64(v)), nil, nil + } + } + case pref.StringKind: + // String values are already unescaped and can be used as is. + return pref.ValueOfString(s), nil, nil + case pref.BytesKind: + if b, ok := unmarshalBytes(s); ok { + return pref.ValueOfBytes(b), nil, nil + } + } + return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) +} + +// Marshal serializes v as the default string according to the given kind k. +// When specifying the Descriptor format for an enum kind, the associated +// enum value descriptor must be provided. +func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil + } else { + return "0", nil + } + } else { + if v.Bool() { + return "true", nil + } else { + return "false", nil + } + } + case pref.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil + case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil + case pref.FloatKind, pref.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): + return "-inf", nil + case math.IsInf(f, +1): + return "inf", nil + case math.IsNaN(f): + return "nan", nil + default: + if k == pref.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } + case pref.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil + case pref.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } + } + return "", errors.New("could not format value for %v: %v", k, v) +} + +// unmarshalBytes deserializes bytes by applying C unescaping. +func unmarshalBytes(s string) ([]byte, bool) { + // Bytes values use the same escaping as the text format, + // however they lack the surrounding double quotes. + v, err := ptext.UnmarshalString(`"` + s + `"`) + if err != nil { + return nil, false + } + return []byte(v), true +} + +// marshalBytes serializes bytes by using C escaping. +// To match the exact output of protoc, this is identical to the +// CEscape function in strutil.cc of the protoc source code. +func marshalBytes(b []byte) (string, bool) { + var s []byte + for _, c := range b { + switch c { + case '\n': + s = append(s, `\n`...) + case '\r': + s = append(s, `\r`...) + case '\t': + s = append(s, `\t`...) + case '"': + s = append(s, `\"`...) + case '\'': + s = append(s, `\'`...) + case '\\': + s = append(s, `\\`...) + default: + if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { + s = append(s, c) + } else { + s = append(s, fmt.Sprintf(`\%03o`, c)...) + } + } + } + return string(s), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go new file mode 100644 index 00000000..b1eeea50 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -0,0 +1,258 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package messageset encodes and decodes the obsolete MessageSet wire format. +package messageset + +import ( + "math" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// The MessageSet wire format is equivalent to a message defiend as follows, +// where each Item defines an extension field with a field number of 'type_id' +// and content of 'message'. MessageSet extensions must be non-repeated message +// fields. +// +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// } +// } +const ( + FieldItem = protowire.Number(1) + FieldTypeID = protowire.Number(2) + FieldMessage = protowire.Number(3) +) + +// ExtensionName is the field name for extensions of MessageSet. +// +// A valid MessageSet extension must be of the form: +// message MyMessage { +// extend proto2.bridge.MessageSet { +// optional MyMessage message_set_extension = 1234; +// } +// ... +// } +const ExtensionName = "message_set_extension" + +// IsMessageSet returns whether the message uses the MessageSet wire format. +func IsMessageSet(md pref.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() +} + +// IsMessageSetExtension reports this field extends a MessageSet. +func IsMessageSetExtension(fd pref.FieldDescriptor) bool { + if fd.Name() != ExtensionName { + return false + } + if fd.FullName().Parent() != fd.Message().FullName() { + return false + } + return IsMessageSet(fd.ContainingMessage()) +} + +// FindMessageSetExtension locates a MessageSet extension field by name. +// In text and JSON formats, the extension name used is the message itself. +// The extension field name is derived by appending ExtensionName. +func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { + name := s.Append(ExtensionName) + xt, err := r.FindExtensionByName(name) + if err != nil { + if err == preg.NotFound { + return nil, err + } + return nil, errors.Wrap(err, "%q", name) + } + if !IsMessageSetExtension(xt.TypeDescriptor()) { + return nil, preg.NotFound + } + return xt, nil +} + +// SizeField returns the size of a MessageSet item field containing an extension +// with the given field number, not counting the contents of the message subfield. +func SizeField(num protowire.Number) int { + return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) +} + +// Unmarshal parses a MessageSet. +// +// It calls fn with the type ID and value of each item in the MessageSet. +// Unknown fields are discarded. +// +// If wantLen is true, the item values include the varint length prefix. +// This is ugly, but simplifies the fast-path decoder in internal/impl. +func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + if num != FieldItem || wtyp != protowire.StartGroupType { + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + continue + } + typeID, value, n, err := ConsumeFieldValue(b, wantLen) + if err != nil { + return err + } + b = b[n:] + if typeID == 0 { + continue + } + if err := fn(typeID, value); err != nil { + return err + } + } + return nil +} + +// ConsumeFieldValue parses b as a MessageSet item field value until and including +// the trailing end group marker. It assumes the start group tag has already been parsed. +// It returns the contents of the type_id and message subfields and the total +// item length. +// +// If wantLen is true, the returned message value includes the length prefix. +func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { + ilen := len(b) + for { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + switch { + case num == FieldItem && wtyp == protowire.EndGroupType: + if wantLen && len(message) == 0 { + // The message field was missing, which should never happen. + // Be prepared for this case anyway. + message = protowire.AppendVarint(message, 0) + } + return typeid, message, ilen - len(b), nil + case num == FieldTypeID && wtyp == protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + if v < 1 || v > math.MaxInt32 { + return 0, nil, 0, errors.New("invalid type_id in message set") + } + typeid = protowire.Number(v) + case num == FieldMessage && wtyp == protowire.BytesType: + m, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + if message == nil { + if wantLen { + message = b[:n:n] + } else { + message = m[:len(m):len(m)] + } + } else { + // This case should never happen in practice, but handle it for + // correctness: The MessageSet item contains multiple message + // fields, which need to be merged. + // + // In the case where we're returning the length, this becomes + // quite inefficient since we need to strip the length off + // the existing data and reconstruct it with the combined length. + if wantLen { + _, nn := protowire.ConsumeVarint(message) + m0 := message[nn:] + message = nil + message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) + message = append(message, m0...) + message = append(message, m...) + } else { + message = append(message, m...) + } + } + b = b[n:] + default: + // We have no place to put it, so we just ignore unknown fields. + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + } + } +} + +// AppendFieldStart appends the start of a MessageSet item field containing +// an extension with the given number. The caller must add the message +// subfield (including the tag). +func AppendFieldStart(b []byte, num protowire.Number) []byte { + b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) + b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) + b = protowire.AppendVarint(b, uint64(num)) + return b +} + +// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. +func AppendFieldEnd(b []byte) []byte { + return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) +} + +// SizeUnknown returns the size of an unknown fields section in MessageSet format. +// +// See AppendUnknown. +func SizeUnknown(unknown []byte) (size int) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return 0 + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return 0 + } + unknown = unknown[n:] + size += SizeField(num) + protowire.SizeTag(FieldMessage) + n + } + return size +} + +// AppendUnknown appends unknown fields to b in MessageSet format. +// +// For historic reasons, unresolved items in a MessageSet are stored in a +// message's unknown fields section in non-MessageSet format. That is, an +// unknown item with typeID T and value V appears in the unknown fields as +// a field with number T and value V. +// +// This function converts the unknown fields back into MessageSet form. +func AppendUnknown(b, unknown []byte) ([]byte, error) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return nil, errors.New("invalid data in message set unknown fields") + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return nil, errors.New("invalid data in message set unknown fields") + } + b = AppendFieldStart(b, num) + b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) + b = append(b, unknown[:n]...) + b = AppendFieldEnd(b) + unknown = unknown[n:] + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go new file mode 100644 index 00000000..16c02d7b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -0,0 +1,207 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag marshals and unmarshals the legacy struct tags as generated +// by historical versions of protoc-gen-go. +package tag + +import ( + "reflect" + "strconv" + "strings" + + defval "google.golang.org/protobuf/internal/encoding/defval" + fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var byteType = reflect.TypeOf(byte(0)) + +// Unmarshal decodes the tag into a prototype.Field. +// +// The goType is needed to determine the original protoreflect.Kind since the +// tag does not record sufficient information to determine that. +// The type is the underlying field type (e.g., a repeated field may be +// represented by []T, but the Go type passed in is just T). +// A list of enum value descriptors must be provided for enum fields. +// This does not populate the Enum or Message (except for weak message). +// +// This function is a best effort attempt; parsing errors are ignored. +func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { + f := new(fdesc.Field) + f.L0.ParentFile = fdesc.SurrogateProto2 + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + f.L0.FullName = pref.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + f.L1.Number = pref.FieldNumber(n) + case s == "opt": + f.L1.Cardinality = pref.Optional + case s == "req": + f.L1.Cardinality = pref.Required + case s == "rep": + f.L1.Cardinality = pref.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: + f.L1.Kind = pref.BoolKind + case reflect.Int32: + f.L1.Kind = pref.Int32Kind + case reflect.Int64: + f.L1.Kind = pref.Int64Kind + case reflect.Uint32: + f.L1.Kind = pref.Uint32Kind + case reflect.Uint64: + f.L1.Kind = pref.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { + f.L1.Kind = pref.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { + f.L1.Kind = pref.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: + f.L1.Kind = pref.Sfixed32Kind + case reflect.Uint32: + f.L1.Kind = pref.Fixed32Kind + case reflect.Float32: + f.L1.Kind = pref.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: + f.L1.Kind = pref.Sfixed64Kind + case reflect.Uint64: + f.L1.Kind = pref.Fixed64Kind + case reflect.Float64: + f.L1.Kind = pref.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: + f.L1.Kind = pref.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: + f.L1.Kind = pref.BytesKind + default: + f.L1.Kind = pref.MessageKind + } + case s == "group": + f.L1.Kind = pref.GroupKind + case strings.HasPrefix(s, "enum="): + f.L1.Kind = pref.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { + f.L1.JSONName.Init(jsonName) + } + case s == "packed": + f.L1.HasPacked = true + f.L1.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true + f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) + f.L1.Default = fdesc.DefaultValue(v, ev) + case s == "proto3": + f.L0.ParentFile = fdesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. + if f.L1.Kind == pref.GroupKind { + f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f +} + +// Marshal encodes the protoreflect.FieldDescriptor as a tag. +// +// The enumName must be provided if the kind is an enum. +// Historically, the formulation of the enum "name" was the proto package +// dot-concatenated with the generated Go identifier for the enum type. +// Depending on the context on how Marshal is called, there are different ways +// through which that information is determined. As such it is the caller's +// responsibility to provide a function to obtain that information. +func Marshal(fd pref.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { + case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + tag = append(tag, "varint") + case pref.Sint32Kind: + tag = append(tag, "zigzag32") + case pref.Sint64Kind: + tag = append(tag, "zigzag64") + case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + tag = append(tag, "fixed32") + case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + tag = append(tag, "fixed64") + case pref.StringKind, pref.BytesKind, pref.MessageKind: + tag = append(tag, "bytes") + case pref.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { + case pref.Optional: + tag = append(tag, "opt") + case pref.Required: + tag = append(tag, "req") + case pref.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) + if fd.Kind() == pref.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. + name = string(fd.Message().Name()) + } + tag = append(tag, "name="+name) + if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { + // NOTE: The jsonName != name condition is suspect, but it preserve + // the exact same semantics from the previous generator. + tag = append(tag, "json="+jsonName) + } + if fd.IsWeak() { + tag = append(tag, "weak="+string(fd.Message().FullName())) + } + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. + if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } + if fd.Kind() == pref.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { + tag = append(tag, "oneof") + } + // This must appear last in the tag, since commas in strings aren't escaped. + if fd.HasDefault() { + def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) + tag = append(tag, "def="+def) + } + return strings.Join(tag, ",") +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go new file mode 100644 index 00000000..eb10ea10 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// Decoder is a token-based textproto decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing the byte characters for MessageOpen and + // ListOpen kinds. The top of stack represents the message or the list that + // the current token is nested in. An empty stack means the current token is + // at the top level message. The characters '{' and '<' both represent the + // MessageOpen kind. + openStack []byte + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +// Peek looks ahead and returns the next token and error without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext(d.lastToken.kind) + if err != nil { + return Token{}, err + } + + switch tok.kind { + case comma, semicolon: + tok, err = d.parseNext(tok.kind) + if err != nil { + return Token{}, err + } + } + d.lastToken = tok + return tok, nil +} + +const ( + mismatchedFmt = "mismatched close character %q" + unexpectedFmt = "unexpected character %q" +) + +// parseNext parses the next Token based on given last kind. +func (d *Decoder) parseNext(lastKind Kind) (Token, error) { + // Trim leading spaces. + d.consume(0) + isEOF := false + if len(d.in) == 0 { + isEOF = true + } + + switch lastKind { + case EOF: + return d.consumeToken(EOF, 0, 0), nil + + case bof: + // Start of top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case Name: + // Next token can be MessageOpen, ListOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + case '[': + d.pushOpenStack(ch) + return d.consumeToken(ListOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case Scalar: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch d.in[0] { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + _, closeCh := d.currentOpenKind() + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case MessageClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case ListOpen: + // Next token can be ListClose, MessageStart or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case ListClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + default: + // It is not possible to have this case. Let it panic below. + } + + case comma, semicolon: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case ListOpen: + if lastKind == semicolon { + // It is not be possible to have this case as logic here + // should not have produced a semicolon Token when inside a + // list. Let it panic below. + break + } + // Next token can be MessageOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + } + } + + line, column := d.Position(len(d.orig) - len(d.in)) + panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) +} + +var otherCloseChar = map[byte]byte{ + '}': '>', + '>': '}', +} + +// currentOpenKind indicates whether current position is inside a message, list +// or top-level message by returning MessageOpen, ListOpen or bof respectively. +// If the returned kind is either a MessageOpen or ListOpen, it also returns the +// corresponding closing character. +func (d *Decoder) currentOpenKind() (Kind, byte) { + if len(d.openStack) == 0 { + return bof, 0 + } + openCh := d.openStack[len(d.openStack)-1] + switch openCh { + case '{': + return MessageOpen, '}' + case '<': + return MessageOpen, '>' + case '[': + return ListOpen, ']' + } + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) +} + +func (d *Decoder) pushOpenStack(ch byte) { + d.openStack = append(d.openStack, ch) +} + +func (d *Decoder) popOpenStack() { + d.openStack = d.openStack[:len(d.openStack)-1] +} + +// parseFieldName parses field name and separator. +func (d *Decoder) parseFieldName() (tok Token, err error) { + defer func() { + if err == nil && d.tryConsumeChar(':') { + tok.attrs |= hasSeparator + } + }() + + // Extension or Any type URL. + if d.in[0] == '[' { + return d.parseTypeName() + } + + // Identifier. + if size := parseIdent(d.in, false); size > 0 { + return d.consumeToken(Name, size, uint8(IdentName)), nil + } + + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { + if !num.neg && num.kind == numDec { + if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } + return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + } + + return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) +} + +// parseTypeName parses Any type URL or extension field name. The name is +// enclosed in [ and ] characters. The C++ parser does not handle many legal URL +// strings. This implementation is more liberal and allows for the pattern +// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed +// in between [ ], '.', '/' and the sub names. +func (d *Decoder) parseTypeName() (Token, error) { + startPos := len(d.orig) - len(d.in) + // Use alias s to advance first in order to use d.in for error handling. + // Caller already checks for [ as first character. + s := consume(d.in[1:], 0) + if len(s) == 0 { + return Token{}, ErrUnexpectedEOF + } + + var name []byte + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + var closed bool + for len(s) > 0 && !closed { + switch { + case s[0] == ']': + s = s[1:] + closed = true + + case s[0] == '/', s[0] == '.': + if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)+1]) + } + name = append(name, s[0]) + s = s[1:] + s = consume(s, 0) + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + default: + return Token{}, d.newSyntaxError( + "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + } + } + + if !closed { + return Token{}, ErrUnexpectedEOF + } + + // First character cannot be '.'. Last character cannot be '.' or '/'. + size := len(name) + if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)]) + } + + d.in = s + endPos := len(d.orig) - len(d.in) + d.consume(0) + + return Token{ + kind: Name, + attrs: uint8(TypeName), + pos: startPos, + raw: d.orig[startPos:endPos], + str: string(name), + }, nil +} + +func isTypeNameChar(b byte) bool { + return (b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z')) +} + +func isWhiteSpace(b byte) bool { + switch b { + case ' ', '\n', '\r', '\t': + return true + default: + return false + } +} + +// parseIdent parses an unquoted proto identifier and returns size. +// If allowNeg is true, it allows '-' to be the first character in the +// identifier. This is used when parsing literal values like -infinity, etc. +// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` +func parseIdent(input []byte, allowNeg bool) int { + var size int + + s := input + if len(s) == 0 { + return 0 + } + + if allowNeg && s[0] == '-' { + s = s[1:] + size++ + if len(s) == 0 { + return 0 + } + } + + switch { + case s[0] == '_', + 'a' <= s[0] && s[0] <= 'z', + 'A' <= s[0] && s[0] <= 'Z': + s = s[1:] + size++ + default: + return 0 + } + + for len(s) > 0 && (s[0] == '_' || + 'a' <= s[0] && s[0] <= 'z' || + 'A' <= s[0] && s[0] <= 'Z' || + '0' <= s[0] && s[0] <= '9') { + s = s[1:] + size++ + } + + if len(s) > 0 && !isDelim(s[0]) { + return 0 + } + + return size +} + +// parseScalar parses for a string, literal or number value. +func (d *Decoder) parseScalar() (Token, error) { + if d.in[0] == '"' || d.in[0] == '\'' { + return d.parseStringValue() + } + + if tok, ok := d.parseLiteralValue(); ok { + return tok, nil + } + + if tok, ok := d.parseNumberValue(); ok { + return tok, nil + } + + return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) +} + +// parseLiteralValue parses a literal value. A literal value is used for +// bools, special floats and enums. This function simply identifies that the +// field value is a literal. +func (d *Decoder) parseLiteralValue() (Token, bool) { + size := parseIdent(d.in, true) + if size == 0 { + return Token{}, false + } + return d.consumeToken(Scalar, size, literalValue), true +} + +// consumeToken constructs a Token for given Kind from d.in and consumes given +// size-length from it. +func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { + // Important to compute raw and pos before consuming. + tok := Token{ + kind: kind, + attrs: attrs, + pos: len(d.orig) - len(d.in), + raw: d.in[:size], + } + d.consume(size) + return tok +} + +// newSyntaxError returns a syntax error with line and column information for +// current position. +func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(len(d.orig) - len(d.in)) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +func (d *Decoder) tryConsumeChar(c byte) bool { + if len(d.in) > 0 && d.in[0] == c { + d.consume(1) + return true + } + return false +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func (d *Decoder) consume(n int) { + d.in = consume(d.in, n) + return +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func consume(b []byte, n int) []byte { + b = b[n:] + for len(b) > 0 { + switch b[0] { + case ' ', '\n', '\r', '\t': + b = b[1:] + case '#': + if i := bytes.IndexByte(b, '\n'); i >= 0 { + b = b[i+len("\n"):] + } else { + b = nil + } + default: + return b + } + } + return b +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) + +// isDelim returns true if given byte is a delimiter character. +func isDelim(c byte) bool { + return !(c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go new file mode 100644 index 00000000..f2d90b78 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +// parseNumberValue parses a number from the input and returns a Token object. +func (d *Decoder) parseNumberValue() (Token, bool) { + in := d.in + num := parseNumber(in) + if num.size == 0 { + return Token{}, false + } + numAttrs := num.kind + if num.neg { + numAttrs |= isNegative + } + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { + strSize = last + } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], + str: string(d.in[:strSize]), + numAttrs: numAttrs, + } + d.consume(num.size) + return tok, true +} + +const ( + numDec uint8 = (1 << iota) / 2 + numHex + numOct + numFloat +) + +// number is the result of parsing out a valid number from parseNumber. It +// contains data for doing float or integer conversion via the strconv package +// in conjunction with the input bytes. +type number struct { + kind uint8 + neg bool + size int +} + +// parseNumber constructs a number object from given input. It allows for the +// following patterns: +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// It also returns the number of parsed bytes for the given number, 0 if it is +// not a number. +func parseNumber(input []byte) number { + kind := numDec + var size int + var neg bool + + s := input + if len(s) == 0 { + return number{} + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + size++ + if len(s) == 0 { + return number{} + } + } + + // C++ allows for whitespace and comments in between the negative sign and + // the rest of the number. This logic currently does not but is consistent + // with v1. + + switch { + case s[0] == '0': + if len(s) > 1 { + switch { + case s[1] == 'x' || s[1] == 'X': + // Parse as hex number. + kind = numHex + n := 2 + s = s[2:] + for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || + ('a' <= s[0] && s[0] <= 'f') || + ('A' <= s[0] && s[0] <= 'F')) { + s = s[1:] + n++ + } + if n == 2 { + return number{} + } + size += n + + case '0' <= s[1] && s[1] <= '7': + // Parse as octal number. + kind = numOct + n := 2 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { + s = s[1:] + n++ + } + size += n + } + + if kind&(numHex|numOct) > 0 { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + return number{kind: kind, neg: neg, size: size} + } + } + s = s[1:] + size++ + + case '1' <= s[0] && s[0] <= '9': + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + + case s[0] == '.': + // Set kind to numFloat to signify the intent to parse as float. And + // that it needs to have other digits after '.'. + kind = numFloat + + default: + return number{} + } + + // . followed by 0 or more digits. + if len(s) > 0 && s[0] == '.' { + n := 1 + s = s[1:] + // If decimal point was before any digits, it should be followed by + // other digits. + if len(s) == 0 && kind == numFloat { + return number{} + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + kind = numFloat + } + + // e or E followed by an optional - or + and 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + kind = numFloat + s = s[1:] + n := 1 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return number{} + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + } + + // Optional suffix f or F for floats. + if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { + kind = numFloat + s = s[1:] + size++ + } + + // Check that next byte is a delimiter or it is at the end. + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + + return number{kind: kind, neg: neg, size: size} +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go new file mode 100644 index 00000000..d4d34902 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go @@ -0,0 +1,161 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +// parseStringValue parses string field token. +// This differs from parseString since the text format allows +// multiple back-to-back string literals where they are semantically treated +// as a single large string with all values concatenated. +// +// E.g., `"foo" "bar" "baz"` => "foobarbaz" +func (d *Decoder) parseStringValue() (Token, error) { + // Note that the ending quote is sufficient to unambiguously mark the end + // of a string. Thus, the text grammar does not require intervening + // whitespace or control characters in-between strings. + // Thus, the following is valid: + // `"foo"'bar'"baz"` => "foobarbaz" + in0 := d.in + var ss []string + for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { + s, err := d.parseString() + if err != nil { + return Token{}, err + } + ss = append(ss, s) + } + // d.in already points to the end of the value at this point. + return Token{ + kind: Scalar, + attrs: stringValue, + pos: len(d.orig) - len(in0), + raw: in0[:len(in0)-len(d.in)], + str: strings.Join(ss, ""), + }, nil +} + +// parseString parses a string value enclosed in " or '. +func (d *Decoder) parseString() (string, error) { + in := d.in + if len(in) == 0 { + return "", ErrUnexpectedEOF + } + quote := in[0] + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", d.newSyntaxError("invalid UTF-8 detected") + case r == 0 || r == '\n': + return "", d.newSyntaxError("invalid character %q in string", r) + case r == rune(quote): + in = in[1:] + d.consume(len(d.in) - len(in)) + return string(out), nil + case r == '\\': + if len(in) < 2 { + return "", ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\'', '\\', '?': + in, out = in[2:], append(out, r) + case 'a': + in, out = in[2:], append(out, '\a') + case 'b': + in, out = in[2:], append(out, '\b') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'v': + in, out = in[2:], append(out, '\v') + case 'f': + in, out = in[2:], append(out, '\f') + case '0', '1', '2', '3', '4', '5', '6', '7': + // One, two, or three octal characters. + n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) + if err != nil { + return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) + } + in, out = in[1+n:], append(out, byte(v)) + case 'x': + // One or two hexadecimal characters. + n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) + if n > 2 { + n = 2 + } + v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) + if err != nil { + return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) + } + in, out = in[2+n:], append(out, byte(v)) + case 'u', 'U': + // Four or eight hexadecimal characters + n := 6 + if r == 'U' { + n = 10 + } + if len(in) < n { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:n]), 16, 32) + if utf8.MaxRune < v || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) + } + in = in[n:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", ErrUnexpectedEOF +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } + +// UnmarshalString returns an unescaped string given a textproto string value. +// String value needs to contain single or double quotes. This is only used by +// internal/encoding/defval package for unmarshaling bytes. +func UnmarshalString(s string) (string, error) { + d := NewDecoder([]byte(s)) + return d.parseString() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go new file mode 100644 index 00000000..83d2b0d5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/flags" +) + +// Kind represents a token kind expressible in the textproto format. +type Kind uint8 + +// Kind values. +const ( + Invalid Kind = iota + EOF + Name // Name indicates the field name. + Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. + MessageOpen + MessageClose + ListOpen + ListClose + + // comma and semi-colon are only for parsing in between values and should not be exposed. + comma + semicolon + + // bof indicates beginning of file, which is the default token + // kind at the beginning of parsing. + bof = Invalid +) + +func (t Kind) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "eof" + case Scalar: + return "scalar" + case Name: + return "name" + case MessageOpen: + return "{" + case MessageClose: + return "}" + case ListOpen: + return "[" + case ListClose: + return "]" + case comma: + return "," + case semicolon: + return ";" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// NameKind represents different types of field names. +type NameKind uint8 + +// NameKind values. +const ( + IdentName NameKind = iota + 1 + TypeName + FieldNumber +) + +func (t NameKind) String() string { + switch t { + case IdentName: + return "IdentName" + case TypeName: + return "TypeName" + case FieldNumber: + return "FieldNumber" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// Bit mask in Token.attrs to indicate if a Name token is followed by the +// separator char ':'. The field name separator char is optional for message +// field or repeated message field, but required for all other types. Decoder +// simply indicates whether a Name token is followed by separator or not. It is +// up to the prototext package to validate. +const hasSeparator = 1 << 7 + +// Scalar value types. +const ( + numberValue = iota + 1 + stringValue + literalValue +) + +// Bit mask in Token.numAttrs to indicate that the number is a negative. +const isNegative = 1 << 7 + +// Token provides a parsed token kind and value. Values are provided by the +// different accessor methods. +type Token struct { + // Kind of the Token object. + kind Kind + // attrs contains metadata for the following Kinds: + // Name: hasSeparator bit and one of NameKind. + // Scalar: one of numberValue, stringValue, literalValue. + attrs uint8 + // numAttrs contains metadata for numberValue: + // - highest bit is whether negative or positive. + // - lower bits indicate one of numDec, numHex, numOct, numFloat. + numAttrs uint8 + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // str contains parsed string for the following: + // - stringValue of Scalar kind + // - numberValue of Scalar kind + // - TypeName of Name kind + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// NameKind returns IdentName, TypeName or FieldNumber. +// It panics if type is not Name. +func (t Token) NameKind() NameKind { + if t.kind == Name { + return NameKind(t.attrs &^ hasSeparator) + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// HasSeparator returns true if the field name is followed by the separator char +// ':', else false. It panics if type is not Name. +func (t Token) HasSeparator() bool { + if t.kind == Name { + return t.attrs&hasSeparator != 0 + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// IdentName returns the value for IdentName type. +func (t Token) IdentName() string { + if t.kind == Name && t.attrs&uint8(IdentName) != 0 { + return string(t.raw) + } + panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// TypeName returns the value for TypeName type. +func (t Token) TypeName() string { + if t.kind == Name && t.attrs&uint8(TypeName) != 0 { + return t.str + } + panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// FieldNumber returns the value for FieldNumber type. It returns a +// non-negative int32 value. Caller will still need to validate for the correct +// field number range. +func (t Token) FieldNumber() int32 { + if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { + panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) + } + // Following should not return an error as it had already been called right + // before this Token was constructed. + num, _ := strconv.ParseInt(string(t.raw), 10, 32) + return int32(num) +} + +// String returns the string value for a Scalar type. +func (t Token) String() (string, bool) { + if t.kind != Scalar || t.attrs != stringValue { + return "", false + } + return t.str, true +} + +// Enum returns the literal value for a Scalar type for use as enum literals. +func (t Token) Enum() (string, bool) { + if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { + return "", false + } + return string(t.raw), true +} + +// Bool returns the bool value for a Scalar type. +func (t Token) Bool() (bool, bool) { + if t.kind != Scalar { + return false, false + } + switch t.attrs { + case literalValue: + if b, ok := boolLits[string(t.raw)]; ok { + return b, true + } + case numberValue: + // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, + // 0x1, etc. + n, err := strconv.ParseUint(t.str, 0, 64) + if err == nil { + switch n { + case 0: + return false, true + case 1: + return true, true + } + } + } + return false, false +} + +// These exact boolean literals are the ones supported in C++. +var boolLits = map[string]bool{ + "t": true, + "true": true, + "True": true, + "f": false, + "false": false, + "False": false, +} + +// Uint64 returns the uint64 value for a Scalar type. +func (t Token) Uint64() (uint64, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 64) + if err != nil { + return 0, false + } + return n, true +} + +// Uint32 returns the uint32 value for a Scalar type. +func (t Token) Uint32() (uint32, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 32) + if err != nil { + return 0, false + } + return uint32(n), true +} + +// Int64 returns the int64 value for a Scalar type. +func (t Token) Int64() (int64, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { + return n, true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { + return int64(n), true + } + } + return 0, false +} + +// Int32 returns the int32 value for a Scalar type. +func (t Token) Int32() (int32, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { + return int32(n), true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { + return int32(n), true + } + } + return 0, false +} + +// Float64 returns the float64 value for a Scalar type. +func (t Token) Float64() (float64, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return f, true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + return n, true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return n, true + } + } + return 0, false +} + +// Float32 returns the float32 value for a Scalar type. +func (t Token) Float32() (float32, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return float32(f), true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + // Overflows are treated as (-)infinity. + return float32(n), true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return float32(n), true + } + } + return 0, false +} + +// These are the supported float literals which C++ permits case-insensitive +// variants of these. +var floatLits = map[string]float64{ + "nan": math.NaN(), + "inf": math.Inf(1), + "infinity": math.Inf(1), + "-inf": math.Inf(-1), + "-infinity": math.Inf(-1), +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.attrs == y.attrs && + x.numAttrs == y.numAttrs && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go new file mode 100644 index 00000000..0ce8d6fb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package text implements the text format for protocol buffers. +// This package has no semantic understanding for protocol buffers and is only +// a parser and composer for the format. +// +// There is no formal specification for the protobuf text format, as such the +// C++ implementation (see google::protobuf::TextFormat) is the reference +// implementation of the text format. +// +// This package is neither a superset nor a subset of the C++ implementation. +// This implementation permits a more liberal grammar in some cases to be +// backwards compatible with the historical Go implementation. +// Future parsings unique to Go should not be added. +// Some grammars allowed by the C++ implementation are deliberately +// not implemented here because they are considered a bug by the protobuf team +// and should not be replicated. +// +// The Go implementation should implement a sufficient amount of the C++ +// grammar such that the default text serialization by C++ can be parsed by Go. +// However, just because the C++ parser accepts some input does not mean that +// the Go implementation should as well. +// +// The text format is almost a superset of JSON except: +// * message keys are not quoted strings, but identifiers +// * the top-level value must be a message without the delimiters +package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go new file mode 100644 index 00000000..c4ba1c59 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -0,0 +1,267 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// encType represents an encoding type. +type encType uint8 + +const ( + _ encType = (1 << iota) / 2 + name + scalar + messageOpen + messageClose +) + +// Encoder provides methods to write out textproto constructs and values. The user is +// responsible for producing valid sequences of constructs and values. +type Encoder struct { + encoderState + + indent string + newline string // set to "\n" if len(indent) > 0 + delims [2]byte + outputASCII bool +} + +type encoderState struct { + lastType encType + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry in a List or Message +// to be preceded by the indent and trailed by a newline. +// +// If delims is not the zero value, it controls the delimiter characters used +// for messages (e.g., "{}" vs "<>"). +// +// If outputASCII is true, strings will be serialized in such a way that +// multi-byte UTF-8 sequences are escaped. This property ensures that the +// overall output is ASCII (as opposed to UTF-8). +func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space and tab characters") + } + e.indent = indent + e.newline = "\n" + } + switch delims { + case [2]byte{0, 0}: + e.delims = [2]byte{'{', '}'} + case [2]byte{'{', '}'}, [2]byte{'<', '>'}: + e.delims = delims + default: + return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") + } + e.outputASCII = outputASCII + + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// StartMessage writes out the '{' or '<' symbol. +func (e *Encoder) StartMessage() { + e.prepareNext(messageOpen) + e.out = append(e.out, e.delims[0]) +} + +// EndMessage writes out the '}' or '>' symbol. +func (e *Encoder) EndMessage() { + e.prepareNext(messageClose) + e.out = append(e.out, e.delims[1]) +} + +// WriteName writes out the field name and the separator ':'. +func (e *Encoder) WriteName(s string) { + e.prepareNext(name) + e.out = append(e.out, s...) + e.out = append(e.out, ':') +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + if b { + e.WriteLiteral("true") + } else { + e.WriteLiteral("false") + } +} + +// WriteString writes out the given string value. +func (e *Encoder) WriteString(s string) { + e.prepareNext(scalar) + e.out = appendString(e.out, s, e.outputASCII) +} + +func appendString(out []byte, in string, outputASCII bool) []byte { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + // We do not report invalid UTF-8 because strings in the text format + // are used to represent both the proto string and bytes type. + r = rune(in[0]) + fallthrough + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'x') + out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + case outputASCII && r >= utf8.RuneSelf: + out = append(out, '\\') + if r <= math.MaxUint16 { + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } else { + out = append(out, 'U') + out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i := 0; i < len(s); i++ { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float value for given bitSize. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, "nan"...) + case math.IsInf(n, +1): + return append(out, "inf"...) + case math.IsInf(n, -1): + return append(out, "-inf"...) + default: + return strconv.AppendFloat(out, n, 'g', -1, bitSize) + } +} + +// WriteInt writes out the given signed integer value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// WriteLiteral writes out the given string as a literal value without quotes. +// This is used for writing enum literal strings. +func (e *Encoder) WriteLiteral(s string) { + e.prepareNext(scalar) + e.out = append(e.out, s...) +} + +// prepareNext adds possible space and indentation for the next value based +// on last encType and indent option. It also updates e.lastType to next. +func (e *Encoder) prepareNext(next encType) { + defer func() { + e.lastType = next + }() + + // Single line. + if len(e.indent) == 0 { + // Add space after each field before the next one. + if e.lastType&(scalar|messageClose) != 0 && next == name { + e.out = append(e.out, ' ') + // Add a random extra space to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + // Multi-line. + switch { + case e.lastType == name: + e.out = append(e.out, ' ') + // Add a random extra space after name: to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + + case e.lastType == messageOpen && next != messageClose: + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + + case e.lastType&(scalar|messageClose) != 0: + if next == messageClose { + e.indents = e.indents[:len(e.indents)-len(e.indent)] + } + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } +} + +// Snapshot returns the current snapshot for use in Reset. +func (e *Encoder) Snapshot() encoderState { + return e.encoderState +} + +// Reset resets the Encoder to the given encoderState from a Snapshot. +func (e *Encoder) Reset(es encoderState) { + e.encoderState = es +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go new file mode 100644 index 00000000..20c17b35 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/internal/detrand" +) + +// Error is a sentinel matching all errors produced by this package. +var Error = errors.New("protobuf error") + +// New formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func New(f string, x ...interface{}) error { + return &prefixError{s: format(f, x...)} +} + +type prefixError struct{ s string } + +var prefix = func() string { + // Deliberately introduce instability into the error message string to + // discourage users from performing error string comparisons. + if detrand.Bool() { + return "proto: " // use non-breaking spaces (U+00a0) + } else { + return "proto: " // use regular spaces (U+0020) + } +}() + +func (e *prefixError) Error() string { + return prefix + e.s +} + +func (e *prefixError) Unwrap() error { + return Error +} + +// Wrap returns an error that has a "proto" prefix, the formatted string described +// by the format specifier and arguments, and a suffix of err. The error wraps err. +func Wrap(err error, f string, x ...interface{}) error { + return &wrapError{ + s: format(f, x...), + err: err, + } +} + +type wrapError struct { + s string + err error +} + +func (e *wrapError) Error() string { + return format("%v%v: %v", prefix, e.s, e.err) +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +func (e *wrapError) Is(target error) bool { + return target == Error +} + +func format(f string, x ...interface{}) string { + // avoid "proto: " prefix when chaining + for i := 0; i < len(x); i++ { + switch e := x[i].(type) { + case *prefixError: + x[i] = e.s + case *wrapError: + x[i] = format("%v: %v", e.s, e.err) + } + } + return fmt.Sprintf(f, x...) +} + +func InvalidUTF8(name string) error { + return New("field %v contains invalid UTF-8", name) +} + +func RequiredNotSet(name string) error { + return New("required field %v not set", name) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go new file mode 100644 index 00000000..f90e909b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -0,0 +1,39 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package errors + +import "reflect" + +// Is is a copy of Go 1.13's errors.Is for use with older Go versions. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + if err = unwrap(err); err == nil { + return false + } + } +} + +func unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go new file mode 100644 index 00000000..dc05f419 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package errors + +import "errors" + +// Is is errors.Is. +func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go new file mode 100644 index 00000000..517c4e2a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fieldsort defines an ordering of fields. +// +// The ordering defined by this package matches the historic behavior of the proto +// package, placing extensions first and oneofs last. +// +// There is no guarantee about stability of the wire encoding, and users should not +// depend on the order defined in this package as it is subject to change without +// notice. +package fieldsort + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Less returns true if field a comes before field j in ordered wire marshal output. +func Less(a, b protoreflect.FieldDescriptor) bool { + ea := a.IsExtension() + eb := b.IsExtension() + oa := a.ContainingOneof() + ob := b.ContainingOneof() + switch { + case ea != eb: + return ea + case oa != nil && ob != nil: + if oa == ob { + return a.Number() < b.Number() + } + return oa.Index() < ob.Index() + case oa != nil && !oa.IsSynthetic(): + return false + case ob != nil && !ob.IsSynthetic(): + return true + default: + return a.Number() < b.Number() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go new file mode 100644 index 00000000..d02d770c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filedesc provides functionality for constructing descriptors. +package filedesc + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder construct a protoreflect.FileDescriptor from the raw descriptor. +type Builder struct { + // GoPackagePath is the Go package path that is invoking this builder. + GoPackagePath string + + // RawDescriptor is the wire-encoded bytes of FileDescriptorProto + // and must be populated. + RawDescriptor []byte + + // NumEnums is the total number of enums declared in the file. + NumEnums int32 + // NumMessages is the total number of messages declared in the file. + // It includes the implicit message declarations for map entries. + NumMessages int32 + // NumExtensions is the total number of extensions declared in the file. + NumExtensions int32 + // NumServices is the total number of services declared in the file. + NumServices int32 + + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { + preg.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. + // Once constructed, the file descriptor is registered here. + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +} + +// resolverByIndex is an interface Builder.FileRegistry may implement. +// If so, it permits looking up an enum or message dependency based on the +// sub-list and element index into filetype.Builder.DependencyIndexes. +type resolverByIndex interface { + FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor +} + +// Indexes of each sub-list in filetype.Builder.DependencyIndexes. +const ( + listFieldDeps int32 = iota + listExtTargets + listExtDeps + listMethInDeps + listMethOutDeps +) + +// Out is the output of the Builder. +type Out struct { + File pref.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum + // Messages is all message descriptors in "flattened ordering". + // It includes the implicit message declarations for map entries. + Messages []Message + // Extensions is all extension descriptors in "flattened ordering". + Extensions []Extension + // Service is all service descriptors in "flattened ordering". + Services []Service +} + +// Build constructs a FileDescriptor given the parameters set in Builder. +// It assumes that the inputs are well-formed and panics if any inconsistencies +// are encountered. +// +// If NumEnums+NumMessages+NumExtensions+NumServices is zero, +// then Build automatically derives them from the raw descriptor. +func (db Builder) Build() (out Out) { + // Populate the counts if uninitialized. + if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { + db.unmarshalCounts(db.RawDescriptor, true) + } + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { + db.TypeResolver = preg.GlobalTypes + } + if db.FileRegistry == nil { + db.FileRegistry = preg.GlobalFiles + } + + fd := newRawFile(db) + out.File = fd + out.Enums = fd.allEnums + out.Messages = fd.allMessages + out.Extensions = fd.allExtensions + out.Services = fd.allServices + + if err := db.FileRegistry.RegisterFile(fd); err != nil { + panic(err) + } + return out +} + +// unmarshalCounts counts the number of enum, message, extension, and service +// declarations in the raw message, which is either a FileDescriptorProto +// or a MessageDescriptorProto depending on whether isFile is set. +func (db *Builder) unmarshalCounts(b []byte, isFile bool) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + if isFile { + switch num { + case genid.FileDescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.FileDescriptorProto_Extension_field_number: + db.NumExtensions++ + case genid.FileDescriptorProto_Service_field_number: + db.NumServices++ + } + } else { + switch num { + case genid.DescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.DescriptorProto_NestedType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.DescriptorProto_Extension_field_number: + db.NumExtensions++ + } + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go new file mode 100644 index 00000000..9385126f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -0,0 +1,614 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// The types in this file may have a suffix: +// • L0: Contains fields common to all descriptors (except File) and +// must be initialized up front. +// • L1: Contains fields specific to a descriptor and +// must be initialized up front. +// • L2: Contains fields that are lazily initialized when constructing +// from the raw file descriptor. When constructing as a literal, the L2 +// fields must be initialized up front. +// +// The types are exported so that packages like reflect/protodesc can +// directly construct descriptors. + +type ( + File struct { + fileRaw + L1 FileL1 + + once uint32 // atomically set if L2 is valid + mu sync.Mutex // protects L2 + L2 *FileL2 + } + FileL1 struct { + Syntax pref.Syntax + Path string + Package pref.FullName + + Enums Enums + Messages Messages + Extensions Extensions + Services Services + } + FileL2 struct { + Options func() pref.ProtoMessage + Imports FileImports + Locations SourceLocations + } +) + +func (fd *File) ParentFile() pref.FileDescriptor { return fd } +func (fd *File) Parent() pref.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } +func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() pref.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() pref.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File +} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() pref.FullName { return fd.L1.Package } +func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(pref.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + +func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { + fd.lazyInitOnce() + } + return fd.L2 +} + +func (fd *File) lazyInitOnce() { + fd.mu.Lock() + if fd.L2 == nil { + fd.lazyRawInit() // recursively initializes all L2 structures + } + atomic.StoreUint32(&fd.once, 1) + fd.mu.Unlock() +} + +// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code +// to be able to retrieve the raw descriptor. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) ProtoLegacyRawDesc() []byte { + return fd.builder.RawDescriptor +} + +// GoPackagePath is a pseudo-internal API for determining the Go package path +// that this file descriptor is declared in. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) GoPackagePath() string { + return fd.builder.GoPackagePath +} + +type ( + Enum struct { + Base + L1 EnumL1 + L2 *EnumL2 // protected by fileDesc.once + } + EnumL1 struct { + eagerValues bool // controls whether EnumL2.Values is already populated + } + EnumL2 struct { + Options func() pref.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges + } + + EnumValue struct { + Base + L1 EnumValueL1 + } + EnumValueL1 struct { + Options func() pref.ProtoMessage + Number pref.EnumNumber + } +) + +func (ed *Enum) Options() pref.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum +} +func (ed *Enum) Values() pref.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values +} +func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 +} + +func (ed *EnumValue) Options() pref.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue +} +func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} + +type ( + Message struct { + Base + L1 MessageL1 + L2 *MessageL2 // protected by fileDesc.once + } + MessageL1 struct { + Enums Enums + Messages Messages + Extensions Extensions + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions + } + MessageL2 struct { + Options func() pref.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges + ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { + Base + L1 FieldL1 + } + FieldL1 struct { + Options func() pref.ProtoMessage + Number pref.FieldNumber + Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers + Kind pref.Kind + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } + + Oneof struct { + Base + L1 OneofL1 + } + OneofL1 struct { + Options func() pref.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + } +) + +func (md *Message) Options() pref.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message +} +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange +} +func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(pref.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +} + +// IsMessageSet is a pseudo-internal API for checking whether a message +// should serialize in the proto1 message format. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet +} + +func (fd *Field) Options() pref.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field +} +func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } +func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasPresence() bool { + return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} +func (fd *Field) IsPacked() bool { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + switch fd.L1.Kind { + case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + default: + return true + } + } + return fd.L1.IsPacked +} +func (fd *Field) IsExtension() bool { return false } +func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +func (fd *Field) MapKey() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) +} +func (fd *Field) MapValue() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) +} +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() pref.MessageDescriptor { + return fd.L0.Parent.(pref.MessageDescriptor) +} +func (fd *Field) Enum() pref.EnumDescriptor { + return fd.L1.Enum +} +func (fd *Field) Message() pref.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + } + return fd.L1.Message +} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(pref.FieldDescriptor) {} + +// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 +// validation for the string field. This exists for Google-internal use only +// since proto3 did not enforce UTF-8 validity prior to the open-source release. +// If this method does not exist, the default is to enforce valid UTF-8. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *Field) EnforceUTF8() bool { + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } + return fd.L0.ParentFile.L1.Syntax == pref.Proto3 +} + +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} +func (od *Oneof) Options() pref.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof +} +func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(pref.OneofDescriptor) {} + +type ( + Extension struct { + Base + L1 ExtensionL1 + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { + Number pref.FieldNumber + Extendee pref.MessageDescriptor + Cardinality pref.Cardinality + Kind pref.Kind + } + ExtensionL2 struct { + Options func() pref.ProtoMessage + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } +) + +func (xd *Extension) Options() pref.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field +} +func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } +func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } +func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } +func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(pref.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} +func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +} + +type ( + Service struct { + Base + L1 ServiceL1 + L2 *ServiceL2 // protected by fileDesc.once + } + ServiceL1 struct{} + ServiceL2 struct { + Options func() pref.ProtoMessage + Methods Methods + } + + Method struct { + Base + L1 MethodL1 + } + MethodL1 struct { + Options func() pref.ProtoMessage + Input pref.MessageDescriptor + Output pref.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } +) + +func (sd *Service) Options() pref.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service +} +func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(pref.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 +} + +func (md *Method) Options() pref.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method +} +func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(pref.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + +// Surrogate files are can be used to create standalone descriptors +// where the syntax is only information derived from the parent file. +var ( + SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} +) + +type ( + Base struct { + L0 BaseL0 + } + BaseL0 struct { + FullName pref.FullName // must be populated + ParentFile *File // must be populated + Parent pref.Descriptor + Index int + } +) + +func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() pref.FullName { return d.L0.FullName } +func (d *Base) ParentFile() pref.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile +} +func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Index() int { return d.L0.Index } +func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) IsPlaceholder() bool { return false } +func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +type jsonName struct { + has bool + once sync.Once + name string +} + +// Init initializes the name. It is exported for use by other internal packages. +func (js *jsonName) Init(s string) { + js.has = true + js.name = s +} + +func (js *jsonName) get(fd pref.FieldDescriptor) string { + if !js.has { + js.once.Do(func() { + js.name = strs.JSONCamelCase(string(fd.Name())) + }) + } + return js.name +} + +func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect + // accidental mutations of the original value. + dv.bytes = append([]byte(nil), b...) + } + return dv +} + +func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { + var evs pref.EnumValueDescriptors + if k == pref.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { + evs = &e.L2.Values + } else { + evs = ed.Values() + } + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. + if ed.IsPlaceholder() && pref.Name(b).IsValid() { + v := pref.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + return DefaultValue(v, ev) + } + } + + v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) + if err != nil { + panic(err) + } + return DefaultValue(v, ev) +} + +type defaultValue struct { + has bool + val pref.Value + enum pref.EnumValueDescriptor + bytes []byte +} + +func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { + if fd.Cardinality() == pref.Repeated { + return pref.Value{} + } + switch fd.Kind() { + case pref.BoolKind: + return pref.ValueOfBool(false) + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + return pref.ValueOfInt32(0) + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return pref.ValueOfInt64(0) + case pref.Uint32Kind, pref.Fixed32Kind: + return pref.ValueOfUint32(0) + case pref.Uint64Kind, pref.Fixed64Kind: + return pref.ValueOfUint64(0) + case pref.FloatKind: + return pref.ValueOfFloat32(0) + case pref.DoubleKind: + return pref.ValueOfFloat64(0) + case pref.StringKind: + return pref.ValueOfString("") + case pref.BytesKind: + return pref.ValueOfBytes(nil) + case pref.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { + return pref.ValueOfEnum(evs.Get(0).Number()) + } + return pref.ValueOfEnum(0) + } + } + + if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { + // TODO: Avoid panic if we're running with the race detector + // and instead spawn a goroutine that periodically resets + // this value back to the original to induce a race. + panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) + } + return dv.val +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go new file mode 100644 index 00000000..66e1fee5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -0,0 +1,471 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// fileRaw is a data struct used when initializing a file descriptor from +// a raw FileDescriptorProto. +type fileRaw struct { + builder Builder + allEnums []Enum + allMessages []Message + allExtensions []Extension + allServices []Service +} + +func newRawFile(db Builder) *File { + fd := &File{fileRaw: fileRaw{builder: db}} + fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) + fd.unmarshalSeed(db.RawDescriptor) + + // Extended message targets are eagerly resolved since registration + // needs this information at program init time. + for i := range fd.allExtensions { + xd := &fd.allExtensions[i] + xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) + } + + fd.checkDecls() + return fd +} + +// initDecls pre-allocates slices for the exact number of enums, messages +// (including map entries), extensions, and services declared in the proto file. +// This is done to avoid regrowing the slice, which would change the address +// for any previously seen declaration. +// +// The alloc methods "allocates" slices by pulling from the capacity. +func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { + fd.allEnums = make([]Enum, 0, numEnums) + fd.allMessages = make([]Message, 0, numMessages) + fd.allExtensions = make([]Extension, 0, numExtensions) + fd.allServices = make([]Service, 0, numServices) +} + +func (fd *File) allocEnums(n int) []Enum { + total := len(fd.allEnums) + es := fd.allEnums[total : total+n] + fd.allEnums = fd.allEnums[:total+n] + return es +} +func (fd *File) allocMessages(n int) []Message { + total := len(fd.allMessages) + ms := fd.allMessages[total : total+n] + fd.allMessages = fd.allMessages[:total+n] + return ms +} +func (fd *File) allocExtensions(n int) []Extension { + total := len(fd.allExtensions) + xs := fd.allExtensions[total : total+n] + fd.allExtensions = fd.allExtensions[:total+n] + return xs +} +func (fd *File) allocServices(n int) []Service { + total := len(fd.allServices) + xs := fd.allServices[total : total+n] + fd.allServices = fd.allServices[:total+n] + return xs +} + +// checkDecls performs a sanity check that the expected number of expected +// declarations matches the number that were found in the descriptor proto. +func (fd *File) checkDecls() { + switch { + case len(fd.allEnums) != cap(fd.allEnums): + case len(fd.allMessages) != cap(fd.allMessages): + case len(fd.allExtensions) != cap(fd.allExtensions): + case len(fd.allServices) != cap(fd.allServices): + default: + return + } + panic("mismatching cardinality") +} + +func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Syntax_field_number: + switch string(v) { + case "proto2": + fd.L1.Syntax = pref.Proto2 + case "proto3": + fd.L1.Syntax = pref.Proto3 + default: + panic("invalid syntax") + } + case genid.FileDescriptorProto_Name_field_number: + fd.L1.Path = sb.MakeString(v) + case genid.FileDescriptorProto_Package_field_number: + fd.L1.Package = pref.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { + if numServices > 0 { + panic("non-contiguous repeated field") + } + posServices = len(b0) - len(b) - n - m + } + numServices++ + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { + fd.L1.Syntax = pref.Proto2 + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + fd.L1.Enums.List = fd.allocEnums(numEnums) + } + if numMessages > 0 { + fd.L1.Messages.List = fd.allocMessages(numMessages) + } + if numExtensions > 0 { + fd.L1.Extensions.List = fd.allocExtensions(numExtensions) + } + if numServices > 0 { + fd.L1.Services.List = fd.allocServices(numServices) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range fd.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range fd.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range fd.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numServices > 0 { + b := b0[posServices:] + for i := range fd.L1.Services.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } +} + +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i + + var numValues int + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Name_field_number: + ed.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.EnumDescriptorProto_Value_field_number: + numValues++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + // Only construct enum value descriptors for top-level enums since + // they are needed for registration. + if pd != pf { + return + } + ed.L1.eagerValues = true + ed.L2 = new(EnumL2) + ed.L2.Values.List = make([]EnumValue, numValues) + for i := 0; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) + i++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalSeedOptions(v) + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + md.L1.Enums.List = pf.allocEnums(numEnums) + } + if numMessages > 0 { + md.L1.Messages.List = pf.allocMessages(numMessages) + } + if numExtensions > 0 { + md.L1.Extensions.List = pf.allocExtensions(numExtensions) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range md.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range md.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range md.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } +} + +func (md *Message) unmarshalSeedOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + xd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + xd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + xd.L1.Kind = pref.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + xd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_Extendee_field_number: + xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Name_field_number: + sd.L0.FullName = appendFullName(sb, pd.FullName(), v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +var nameBuilderPool = sync.Pool{ + New: func() interface{} { return new(strs.Builder) }, +} + +func getBuilder() *strs.Builder { + return nameBuilderPool.Get().(*strs.Builder) +} +func putBuilder(b *strs.Builder) { + nameBuilderPool.Put(b) +} + +// makeFullName converts b to a protoreflect.FullName, +// where b must start with a leading dot. +func makeFullName(sb *strs.Builder, b []byte) pref.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } + return pref.FullName(sb.MakeString(b[1:])) +} + +func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { + return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go new file mode 100644 index 00000000..e672233e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -0,0 +1,704 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func (fd *File) lazyRawInit() { + fd.unmarshalFull(fd.builder.RawDescriptor) + fd.resolveMessages() + fd.resolveExtensions() + fd.resolveServices() +} + +func (file *File) resolveMessages() { + var depIdx int32 + for i := range file.allMessages { + md := &file.allMessages[i] + + // Resolve message field dependencies. + for j := range md.L2.Fields.List { + fd := &md.L2.Fields.List[j] + + // Weak fields are resolved upon actual use. + if fd.L1.IsWeak { + continue + } + + // Resolve message field dependency. + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := fd.L1.Default.val; v.IsValid() { + fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) + } + } + } +} + +func (file *File) resolveExtensions() { + var depIdx int32 + for i := range file.allExtensions { + xd := &file.allExtensions[i] + + // Resolve extension field dependency. + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := xd.L2.Default.val; v.IsValid() { + xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) + } + } +} + +func (file *File) resolveServices() { + var depIdx int32 + for i := range file.allServices { + sd := &file.allServices[i] + + // Resolve method dependencies. + for j := range sd.L2.Methods.List { + md := &sd.L2.Methods.List[j] + md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) + md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) + depIdx++ + } + } +} + +func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { + return ed2 + } + } + for i := range file.allEnums { + if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { + return ed2 + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { + return d.(pref.EnumDescriptor) + } + return ed +} + +func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { + return md2 + } + } + for i := range file.allMessages { + if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { + return md2 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + return md +} + +func (fd *File) unmarshalFull(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var enumIdx, messageIdx, extensionIdx, serviceIdx int + var rawOptions []byte + fd.L2 = new(FileL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_PublicDependency_field_number: + fd.L2.Imports[v].IsPublic = true + case genid.FileDescriptorProto_WeakDependency_field_number: + fd.L2.Imports[v].IsWeak = true + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Dependency_field_number: + path := sb.MakeString(v) + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_EnumType_field_number: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.FileDescriptorProto_MessageType_field_number: + fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.FileDescriptorProto_Extension_field_number: + fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.FileDescriptorProto_Service_field_number: + fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) + serviceIdx++ + case genid.FileDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) +} + +func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + var rawValues [][]byte + var rawOptions []byte + if !ed.L1.eagerValues { + ed.L2 = new(EnumL2) + } + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + rawValues = append(rawValues, v) + case genid.EnumDescriptorProto_ReservedName_field_number: + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.EnumDescriptorProto_ReservedRange_field_number: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case genid.EnumDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if !ed.L1.eagerValues && len(rawValues) > 0 { + ed.L2.Values.List = make([]EnumValue, len(rawValues)) + for i, b := range rawValues { + ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) + } + } + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) +} + +func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: + r[0] = pref.EnumNumber(v) + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: + r[1] = pref.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Number_field_number: + vd.L1.Number = pref.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Name_field_number: + // NOTE: Enum values are in the same scope as the enum parent. + vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) + case genid.EnumValueDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) +} + +func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + var rawFields, rawOneofs [][]byte + var enumIdx, messageIdx, extensionIdx int + var rawOptions []byte + md.L2 = new(MessageL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Field_field_number: + rawFields = append(rawFields, v) + case genid.DescriptorProto_OneofDecl_field_number: + rawOneofs = append(rawOneofs, v) + case genid.DescriptorProto_ReservedName_field_number: + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.DescriptorProto_ReservedRange_field_number: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case genid.DescriptorProto_ExtensionRange_field_number: + r, rawOptions := unmarshalMessageExtensionRange(v) + opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) + case genid.DescriptorProto_EnumType_field_number: + md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.DescriptorProto_NestedType_field_number: + md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.DescriptorProto_Extension_field_number: + md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawFields) > 0 || len(rawOneofs) > 0 { + md.L2.Fields.List = make([]Field, len(rawFields)) + md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + if fd.L1.Cardinality == pref.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } + for i, b := range rawOneofs { + od := &md.L2.Oneofs.List[i] + od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + } + } + md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) +} + +func (md *Message) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ReservedRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ReservedRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ExtensionRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r, rawOptions +} + +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i + + var rawTypeName []byte + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + fd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + fd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + fd.L1.Kind = pref.Kind(v) + case genid.FieldDescriptorProto_OneofIndex_field_number: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either + // of them. This ensures pointers to slice elements are stable. + od := &pd.(*Message).L2.Oneofs.List[v] + od.L1.Fields.List = append(od.L1.Fields.List, fd) + if fd.L1.ContainingOneof != nil { + panic("oneof type already set") + } + fd.L1.ContainingOneof = od + case genid.FieldDescriptorProto_Proto3Optional_field_number: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + fd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.JSONName.Init(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + fd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } + fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (fd *Field) unmarshalOptions(b []byte) { + const FieldOptions_EnforceUTF8 = 13 + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + fd.L1.HasPacked = true + fd.L1.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Weak_field_number: + fd.L1.IsWeak = protowire.DecodeBool(v) + case FieldOptions_EnforceUTF8: + fd.L1.HasEnforceUTF8 = true + fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.OneofDescriptorProto_Name_field_number: + od.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.OneofDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) +} + +func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + var rawTypeName []byte + var rawOptions []byte + xd.L2 = new(ExtensionL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Proto3Optional_field_number: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.JSONName.Init(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } + xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L2.IsPacked = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + var rawMethods [][]byte + var rawOptions []byte + sd.L2 = new(ServiceL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Method_field_number: + rawMethods = append(rawMethods, v) + case genid.ServiceDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawMethods) > 0 { + sd.L2.Methods.List = make([]Method, len(rawMethods)) + for i, b := range rawMethods { + sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) + } + } + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) +} + +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_ClientStreaming_field_number: + md.L1.IsStreamingClient = protowire.DecodeBool(v) + case genid.MethodDescriptorProto_ServerStreaming_field_number: + md.L1.IsStreamingServer = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.MethodDescriptorProto_InputType_field_number: + md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_OutputType_field_number: + md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) +} + +// appendOptions appends src to dst, where the returned slice is never nil. +// This is necessary to distinguish between empty and unpopulated options. +func appendOptions(dst, src []byte) []byte { + if dst == nil { + dst = []byte{} + } + return append(dst, src...) +} + +// optionsUnmarshaler constructs a lazy unmarshal function for an options message. +// +// The type of message to unmarshal to is passed as a pointer since the +// vars in descopts may not yet be populated at the time this function is called. +func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { + if b == nil { + return nil + } + var opts pref.ProtoMessage + var once sync.Once + return func() pref.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, + }).Unmarshal(b, opts); err != nil { + panic(err) + } + }) + return opts + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go new file mode 100644 index 00000000..c876cd34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -0,0 +1,282 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "sort" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type FileImports []pref.FileImport + +func (p *FileImports) Len() int { return len(*p) } +func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + +type Names struct { + List []pref.Name + once sync.Once + has map[pref.Name]int // protected by once +} + +func (p *Names) Len() int { return len(p.List) } +func (p *Names) Get(i int) pref.Name { return p.List[i] } +func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *Names) ProtoInternal(pragma.DoNotImplement) {} +func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } + } + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *Names) CheckValid() error { + for s, n := range p.lazyInit().has { + switch { + case n > 1: + return errors.New("duplicate name: %q", s) + case false && !s.IsValid(): + // NOTE: The C++ implementation does not validate the identifier. + // See https://github.com/protocolbuffers/protobuf/issues/6335. + return errors.New("invalid name: %q", s) + } + } + return nil +} + +type EnumRanges struct { + List [][2]pref.EnumNumber // start inclusive; end inclusive + once sync.Once + sorted [][2]pref.EnumNumber // protected by once +} + +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n pref.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumRanges) lazyInit() *EnumRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *EnumRanges) CheckValid() error { + var rp enumRange + for i, r := range p.lazyInit().sorted { + r := enumRange(r) + switch { + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +type enumRange [2]protoreflect.EnumNumber + +func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive +func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive +func (r enumRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldRanges struct { + List [][2]pref.FieldNumber // start inclusive; end exclusive + once sync.Once + sorted [][2]pref.FieldNumber // protected by once +} + +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n pref.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *FieldRanges) lazyInit() *FieldRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of ranges with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *FieldRanges) CheckValid(isMessageSet bool) error { + var rp fieldRange + for i, r := range p.lazyInit().sorted { + r := fieldRange(r) + switch { + case !isValidFieldNumber(r.Start(), isMessageSet): + return errors.New("invalid field number: %d", r.Start()) + case !isValidFieldNumber(r.End(), isMessageSet): + return errors.New("invalid field number: %d", r.End()) + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +// isValidFieldNumber reports whether the field number is valid. +// Unlike the FieldNumber.IsValid method, it allows ranges that cover the +// reserved number range. +func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) +} + +// CheckOverlap reports an error if p and q overlap. +func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { + rps := p.lazyInit().sorted + rqs := q.lazyInit().sorted + for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { + rp := fieldRange(rps[pi]) + rq := fieldRange(rqs[qi]) + if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { + return errors.New("overlapping ranges: %v with %v", rp, rq) + } + if rp.Start() < rq.Start() { + pi++ + } else { + qi++ + } + } + return nil +} + +type fieldRange [2]protoreflect.FieldNumber + +func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive +func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive +func (r fieldRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldNumbers struct { + List []pref.FieldNumber + once sync.Once + has map[pref.FieldNumber]struct{} // protected by once +} + +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n pref.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } + } + }) + _, ok := p.has[n] + return ok +} +func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + +type OneofFields struct { + List []pref.FieldDescriptor + once sync.Once + byName map[pref.Name]pref.FieldDescriptor // protected by once + byJSON map[string]pref.FieldDescriptor // protected by once + byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once +} + +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } +func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + +func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f + p.byJSON[f.JSONName()] = f + p.byNum[f.Number()] = f + } + } + }) + return p +} + +type SourceLocations struct { + List []pref.SourceLocation +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } +func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go new file mode 100644 index 00000000..6a8825e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -0,0 +1,345 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package filedesc + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type Enums struct { + List []Enum + once sync.Once + byName map[protoreflect.Name]*Enum // protected by once +} + +func (p *Enums) Len() int { + return len(p.List) +} +func (p *Enums) Get(i int) protoreflect.EnumDescriptor { + return &p.List[i] +} +func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Enums) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} +func (p *Enums) lazyInit() *Enums { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type EnumValues struct { + List []EnumValue + once sync.Once + byName map[protoreflect.Name]*EnumValue // protected by once + byNum map[protoreflect.EnumNumber]*EnumValue // protected by once +} + +func (p *EnumValues) Len() int { + return len(p.List) +} +func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { + return &p.List[i] +} +func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *EnumValues) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumValues) lazyInit() *EnumValues { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) + p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Messages struct { + List []Message + once sync.Once + byName map[protoreflect.Name]*Message // protected by once +} + +func (p *Messages) Len() int { + return len(p.List) +} +func (p *Messages) Get(i int) protoreflect.MessageDescriptor { + return &p.List[i] +} +func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Messages) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} +func (p *Messages) lazyInit() *Messages { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Message, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Fields struct { + List []Field + once sync.Once + byName map[protoreflect.Name]*Field // protected by once + byJSON map[string]*Field // protected by once + byNum map[protoreflect.FieldNumber]*Field // protected by once +} + +func (p *Fields) Len() int { + return len(p.List) +} +func (p *Fields) Get(i int) protoreflect.FieldDescriptor { + return &p.List[i] +} +func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byJSON[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *Fields) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} +func (p *Fields) lazyInit() *Fields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Field, len(p.List)) + p.byJSON = make(map[string]*Field, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byJSON[d.JSONName()]; !ok { + p.byJSON[d.JSONName()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Oneofs struct { + List []Oneof + once sync.Once + byName map[protoreflect.Name]*Oneof // protected by once +} + +func (p *Oneofs) Len() int { + return len(p.List) +} +func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { + return &p.List[i] +} +func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Oneofs) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} +func (p *Oneofs) lazyInit() *Oneofs { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Extensions struct { + List []Extension + once sync.Once + byName map[protoreflect.Name]*Extension // protected by once +} + +func (p *Extensions) Len() int { + return len(p.List) +} +func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { + return &p.List[i] +} +func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Extensions) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} +func (p *Extensions) lazyInit() *Extensions { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Services struct { + List []Service + once sync.Once + byName map[protoreflect.Name]*Service // protected by once +} + +func (p *Services) Len() int { + return len(p.List) +} +func (p *Services) Get(i int) protoreflect.ServiceDescriptor { + return &p.List[i] +} +func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Services) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Services) ProtoInternal(pragma.DoNotImplement) {} +func (p *Services) lazyInit() *Services { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Service, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Methods struct { + List []Method + once sync.Once + byName map[protoreflect.Name]*Method // protected by once +} + +func (p *Methods) Len() int { + return len(p.List) +} +func (p *Methods) Get(i int) protoreflect.MethodDescriptor { + return &p.List[i] +} +func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Methods) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} +func (p *Methods) lazyInit() *Methods { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Method, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go new file mode 100644 index 00000000..dbf2c605 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + emptyNames = new(Names) + emptyEnumRanges = new(EnumRanges) + emptyFieldRanges = new(FieldRanges) + emptyFieldNumbers = new(FieldNumbers) + emptySourceLocations = new(SourceLocations) + + emptyFiles = new(FileImports) + emptyMessages = new(Messages) + emptyFields = new(Fields) + emptyOneofs = new(Oneofs) + emptyEnums = new(Enums) + emptyEnumValues = new(EnumValues) + emptyExtensions = new(Extensions) + emptyServices = new(Services) +) + +// PlaceholderFile is a placeholder, representing only the file path. +type PlaceholderFile string + +func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } +func (f PlaceholderFile) Parent() pref.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } +func (f PlaceholderFile) Name() pref.Name { return "" } +func (f PlaceholderFile) FullName() pref.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() pref.FullName { return "" } +func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnum is a placeholder, representing only the full name. +type PlaceholderEnum pref.FullName + +func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnumValue is a placeholder, representing only the full name. +type PlaceholderEnumValue pref.FullName + +func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderMessage is a placeholder, representing only the full name. +type PlaceholderMessage pref.FullName + +func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } +func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } +func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } +func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go new file mode 100644 index 00000000..0a0dd35d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -0,0 +1,297 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filetype provides functionality for wrapping descriptors +// with Go type information. +package filetype + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + fdesc "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder constructs type descriptors from a raw file descriptor +// and associated Go types for each enum and message declaration. +// +// +// Flattened Ordering +// +// The protobuf type system represents declarations as a tree. Certain nodes in +// the tree require us to either associate it with a concrete Go type or to +// resolve a dependency, which is information that must be provided separately +// since it cannot be derived from the file descriptor alone. +// +// However, representing a tree as Go literals is difficult to simply do in a +// space and time efficient way. Thus, we store them as a flattened list of +// objects where the serialization order from the tree-based form is important. +// +// The "flattened ordering" is defined as a tree traversal of all enum, message, +// extension, and service declarations using the following algorithm: +// +// def VisitFileDecls(fd): +// for e in fd.Enums: yield e +// for m in fd.Messages: yield m +// for x in fd.Extensions: yield x +// for s in fd.Services: yield s +// for m in fd.Messages: yield from VisitMessageDecls(m) +// +// def VisitMessageDecls(md): +// for e in md.Enums: yield e +// for m in md.Messages: yield m +// for x in md.Extensions: yield x +// for m in md.Messages: yield from VisitMessageDecls(m) +// +// The traversal starts at the root file descriptor and yields each direct +// declaration within each node before traversing into sub-declarations +// that children themselves may have. +type Builder struct { + // File is the underlying file descriptor builder. + File fdesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. + // + // Declarations are Go types generated for enums and messages directly + // declared (not publicly imported) in the proto source file. + // Messages for map entries are accounted for, but represented by nil. + // Enum declarations in "flattened ordering" come first, followed by + // message declarations in "flattened ordering". + // + // Dependencies are Go types for enums or messages referenced by + // message fields (excluding weak fields), for parent extended messages of + // extension fields, for enums or messages referenced by extension fields, + // and for input and output messages referenced by service methods. + // Dependencies must come after declarations, but the ordering of + // dependencies themselves is unspecified. + GoTypes []interface{} + + // DependencyIndexes is an ordered list of indexes into GoTypes for the + // dependencies of messages, extensions, or services. + // + // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: + // 0. Message field dependencies: list of the enum or message type + // referred to by every message field. + // 1. Extension field targets: list of the extended parent message of + // every extension. + // 2. Extension field dependencies: list of the enum or message type + // referred to by every extension field. + // 3. Service method inputs: list of the input message type + // referred to by every service method. + // 4. Service method outputs: list of the output message type + // referred to by every service method. + // + // The offset into DependencyIndexes for the start of each sub-list + // is appended to the end in reverse order. + DependencyIndexes []int32 + + // EnumInfos is a list of enum infos in "flattened ordering". + EnumInfos []pimpl.EnumInfo + + // MessageInfos is a list of message infos in "flattened ordering". + // If provided, the GoType and PBType for each element is populated. + // + // Requirement: len(MessageInfos) == len(Build.Messages) + MessageInfos []pimpl.MessageInfo + + // ExtensionInfos is a list of extension infos in "flattened ordering". + // Each element is initialized and registered with the protoregistry package. + // + // Requirement: len(LegacyExtensions) == len(Build.Extensions) + ExtensionInfos []pimpl.ExtensionInfo + + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { + RegisterMessage(pref.MessageType) error + RegisterEnum(pref.EnumType) error + RegisterExtension(pref.ExtensionType) error + } +} + +// Out is the output of the builder. +type Out struct { + File pref.FileDescriptor +} + +func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { + tb.File.FileRegistry = preg.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, + depIdxs: tb.DependencyIndexes, + fileRegistry: tb.File.FileRegistry, + } + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { + tb.TypeRegistry = preg.GlobalTypes + } + + fbOut := tb.File.Build() + out.File = fbOut.File + + // Process enums. + enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] + if len(tb.EnumInfos) != len(fbOut.Enums) { + panic("mismatching enum lengths") + } + if len(fbOut.Enums) > 0 { + for i := range fbOut.Enums { + tb.EnumInfos[i] = pimpl.EnumInfo{ + GoReflectType: reflect.TypeOf(enumGoTypes[i]), + Desc: &fbOut.Enums[i], + } + // Register enum types. + if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { + panic(err) + } + } + } + + // Process messages. + messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] + if len(tb.MessageInfos) != len(fbOut.Messages) { + panic("mismatching message lengths") + } + if len(fbOut.Messages) > 0 { + for i := range fbOut.Messages { + if messageGoTypes[i] == nil { + continue // skip map entry + } + + tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) + tb.MessageInfos[i].Desc = &fbOut.Messages[i] + + // Register message types. + if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { + panic(err) + } + } + + // As a special-case for descriptor.proto, + // locally register concrete message type for the options. + if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": + descopts.File = messageGoTypes[i].(pref.ProtoMessage) + case "EnumOptions": + descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + case "EnumValueOptions": + descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + case "MessageOptions": + descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + case "FieldOptions": + descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + case "OneofOptions": + descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + case "ExtensionRangeOptions": + descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + case "ServiceOptions": + descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + case "MethodOptions": + descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + } + } + } + } + + // Process extensions. + if len(tb.ExtensionInfos) != len(fbOut.Extensions) { + panic("mismatching extension lengths") + } + var depIdx int32 + for i := range fbOut.Extensions { + // For enum and message kinds, determine the referent Go type so + // that we can construct their constructors. + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { + case pref.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + case pref.MessageKind, pref.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + default: + goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] + } + if fbOut.Extensions[i].IsList() { + goType = reflect.SliceOf(goType) + } + + pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) + + // Register extension types. + if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { + panic(err) + } + } + + return out +} + +var goTypeForPBKind = map[pref.Kind]reflect.Type{ + pref.BoolKind: reflect.TypeOf(bool(false)), + pref.Int32Kind: reflect.TypeOf(int32(0)), + pref.Sint32Kind: reflect.TypeOf(int32(0)), + pref.Sfixed32Kind: reflect.TypeOf(int32(0)), + pref.Int64Kind: reflect.TypeOf(int64(0)), + pref.Sint64Kind: reflect.TypeOf(int64(0)), + pref.Sfixed64Kind: reflect.TypeOf(int64(0)), + pref.Uint32Kind: reflect.TypeOf(uint32(0)), + pref.Fixed32Kind: reflect.TypeOf(uint32(0)), + pref.Uint64Kind: reflect.TypeOf(uint64(0)), + pref.Fixed64Kind: reflect.TypeOf(uint64(0)), + pref.FloatKind: reflect.TypeOf(float32(0)), + pref.DoubleKind: reflect.TypeOf(float64(0)), + pref.StringKind: reflect.TypeOf(string("")), + pref.BytesKind: reflect.TypeOf([]byte(nil)), +} + +type depIdxs []int32 + +// Get retrieves the jth element of the ith sub-list. +func (x depIdxs) Get(i, j int32) int32 { + return x[x[int32(len(x))-i-1]+j] +} + +type ( + resolverByIndex struct { + goTypes []interface{} + depIdxs depIdxs + fileRegistry + } + fileRegistry interface { + FindFileByPath(string) (pref.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +) + +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { + return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) + } +} + +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { + return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go new file mode 100644 index 00000000..58372dd3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags provides a set of flags controlled by build tags. +package flags + +// ProtoLegacy specifies whether to enable support for legacy functionality +// such as MessageSets, weak fields, and various other obscure behavior +// that is necessary to maintain backwards compatibility with proto1 or +// the pre-release variants of proto2 and proto3. +// +// This is disabled by default unless built with the "protolegacy" tag. +// +// WARNING: The compatibility agreement covers nothing provided by this flag. +// As such, functionality may suddenly be removed or changed at our discretion. +const ProtoLegacy = protoLegacy + +// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. +// +// Lazy extension unmarshaling validates the contents of message-valued +// extension fields at unmarshal time, but defers creating the message +// structure until the extension is first accessed. +const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go new file mode 100644 index 00000000..a72995f0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !protolegacy + +package flags + +const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go new file mode 100644 index 00000000..772e2f0e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build protolegacy + +package flags + +const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 00000000..e6f7d47a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 00000000..df8f9185 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 00000000..e3cdf1c2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 00000000..45ccd012 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 00000000..b070ef4f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 00000000..762abb34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 00000000..70bed453 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 00000000..693d2e9e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 00000000..8f9ea02f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 00000000..3e99ae16 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 00000000..1a38944b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 00000000..f5cd5634 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 00000000..3bc71013 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 00000000..429384b8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 00000000..72527d2a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go new file mode 100644 index 00000000..b5974528 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// Export is a zero-length named type that exists only to export a set of +// functions that we do not want to appear in godoc. +type Export struct{} + +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + +// enum is any enum type generated by protoc-gen-go +// and must be a named int32 type. +type enum = interface{} + +// EnumOf returns the protoreflect.Enum interface over e. +// It returns nil if e is nil. +func (Export) EnumOf(e enum) pref.Enum { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) + } +} + +// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. +// It returns nil if e is nil. +func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) + } +} + +// EnumTypeOf returns the protoreflect.EnumType for e. +// It returns nil if e is nil. +func (Export) EnumTypeOf(e enum) pref.EnumType { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) + } +} + +// EnumStringOf returns the enum value as a string, either as the name if +// the number is resolvable, or the number formatted as a string. +func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) + } + return strconv.Itoa(int(n)) +} + +// message is any message type generated by protoc-gen-go +// and must be a pointer to a named struct type. +type message = interface{} + +// legacyMessageWrapper wraps a v2 message as a v1 message. +type legacyMessageWrapper struct{ m pref.ProtoMessage } + +func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } +func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +func (m legacyMessageWrapper) ProtoMessage() {} + +// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil + case piface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) + case pref.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +func (Export) protoMessageV2Of(m message) pref.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil + case pref.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m + case piface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv + } + return legacyWrapMessage(reflect.ValueOf(m)).Interface() +} + +// MessageOf returns the protoreflect.Message interface over m. +// It returns nil if m is nil. +func (Export) MessageOf(m message) pref.Message { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect() + } + return legacyWrapMessage(reflect.ValueOf(m)) +} + +// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. +// It returns nil if m is nil. +func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Descriptor() + } + return LegacyLoadMessageDesc(reflect.TypeOf(m)) +} + +// MessageTypeOf returns the protoreflect.MessageType for m. +// It returns nil if m is nil. +func (Export) MessageTypeOf(m message) pref.MessageType { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageInfo(reflect.TypeOf(m), "") +} + +// MessageStringOf returns the message value as a string, +// which is the message serialized in the protobuf text format. +func (Export) MessageStringOf(m pref.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go new file mode 100644 index 00000000..b82341e5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) +} + +func (mi *MessageInfo) checkInitializedPointer(p pointer) error { + mi.init() + if !mi.needsInitCheck { + return nil + } + if p.IsNil() { + for _, f := range mi.orderedCoderFields { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + } + return nil + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + if err := mi.isInitExtensions(e); err != nil { + return err + } + } + for _, f := range mi.orderedCoderFields { + if !f.isRequired && f.funcs.isInit == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit == nil { + continue + } + if err := f.funcs.isInit(fptr, f); err != nil { + return err + } + } + return nil +} + +func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { + if ext == nil { + return nil + } + for _, x := range *ext { + ei := getExtensionFieldInfo(x.Type()) + if ei.funcs.isInit == nil { + continue + } + v := x.Value() + if !v.IsValid() { + continue + } + if err := ei.funcs.isInit(v); err != nil { + return err + } + } + return nil +} + +var ( + needsInitCheckMu sync.Mutex + needsInitCheckMap sync.Map +) + +// needsInitCheck reports whether a message needs to be checked for partial initialization. +// +// It returns true if the message transitively includes any required or extension fields. +func needsInitCheck(md pref.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has + } + } + needsInitCheckMu.Lock() + defer needsInitCheckMu.Unlock() + return needsInitCheckLocked(md) +} + +func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. + // + // If has is false, we've previously determined that it can never + // be uninitialized. + // + // If has is not a bool, we've just encountered a cycle in the + // message graph. In this case, it is safe to return false: If + // the message does have required fields, we'll detect them later + // in the graph traversal. + has, ok := v.(bool) + return ok && has + } + needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message + defer func() { + needsInitCheckMap.Store(md, has) + }() + if md.RequiredNumbers().Len() > 0 { + return true + } + if md.ExtensionRanges().Len() > 0 { + return true + } + for i := 0; i < md.Fields().Len(); i++ { + fd := md.Fields().Get(i) + // Map keys are never messages, so just consider the map value. + if fd.IsMap() { + fd = fd.MapValue() + } + fmd := fd.Message() + if fmd != nil && needsInitCheckLocked(fmd) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go new file mode 100644 index 00000000..08d35170 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type extensionFieldInfo struct { + wiretag uint64 + tagsize int + unmarshalNeedsValue bool + funcs valueCoderFuncs + validation validationInfo +} + +var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + +func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info + } + return legacyLoadExtensionFieldInfo(xt) +} + +// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } + e := makeExtensionFieldInfo(xt.TypeDescriptor()) + if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { + return e.(*extensionFieldInfo) + } + return e +} + +func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) + } else { + wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) + } + e := &extensionFieldInfo{ + wiretag: wiretag, + tagsize: protowire.SizeVarint(wiretag), + funcs: encoderFuncsForValue(xd), + } + // Does the unmarshal function need a value passed to it? + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { + case pref.MessageKind, pref.GroupKind, pref.EnumKind: + e.unmarshalNeedsValue = true + default: + if xd.Cardinality() == pref.Repeated { + e.unmarshalNeedsValue = true + } + } + return e +} + +type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo + value pref.Value + b []byte + fn func() pref.Value +} + +type ExtensionField struct { + typ pref.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. + value pref.Value + lazy *lazyExtensionValue +} + +func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } + f.typ = xt + f.lazy.xi = xi + f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) + f.lazy.b = append(f.lazy.b, b...) +} + +func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { + if f.typ == nil { + return true + } + if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + return true + } + return false +} + +func (f *ExtensionField) lazyInit() { + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { + return + } + if f.lazy.xi != nil { + b := f.lazy.b + val := f.typ.New() + for len(b) > 0 { + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + panic(errors.New("bad tag in lazy extension decoding")) + } + b = b[n:] + } + num := protowire.Number(tag >> 3) + wtyp := protowire.Type(tag & 7) + var out unmarshalOutput + var err error + val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) + if err != nil { + panic(errors.New("decode failure in lazy extension decoding: %v", err)) + } + b = b[out.n:] + } + f.lazy.value = val + } else { + f.lazy.value = f.lazy.fn() + } + f.lazy.xi = nil + f.lazy.fn = nil + f.lazy.b = nil + atomic.StoreUint32(&f.lazy.atomicOnce, 1) +} + +// Set sets the type and value of the extension field. +// This must not be called concurrently. +func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { + f.typ = t + f.value = v + f.lazy = nil +} + +// SetLazy sets the type and a value that is to be lazily evaluated upon first use. +// This must not be called concurrently. +func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} +} + +// Value returns the value of the extension field. +// This may be called concurrently. +func (f *ExtensionField) Value() pref.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() + } + return f.lazy.value + } + return f.value +} + +// Type returns the type of the extension field. +// This may be called concurrently. +func (f ExtensionField) Type() pref.ExtensionType { + return f.typ +} + +// IsSet returns whether the extension field is set. +// This may be called concurrently. +func (f ExtensionField) IsSet() bool { + return f.typ != nil +} + +// IsLazy reports whether a field is lazily encoded. +// It is exported for testing. +func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { + case *messageState: + mi = m.messageInfo() + p = m.pointer() + case *messageReflectWrapper: + mi = m.messageInfo() + p = m.pointer() + default: + return false + } + xd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + return false + } + xt := xd.Type() + ext := mi.extensionMap(p) + if ext == nil { + return false + } + f, ok := (*ext)[int32(fd.Number())] + if !ok { + return false + } + return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go new file mode 100644 index 00000000..c00744d3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -0,0 +1,828 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type errInvalidUTF8 struct{} + +func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } +func (errInvalidUTF8) InvalidUTF8() bool { return true } + +// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. +// +// For size, marshal, and isInit operations, functions are set only on the first field +// in the oneof. The functions are called when the oneof is non-nil, and will dispatch +// to the appropriate field-specific function as necessary. +// +// The unmarshal function is set on each field individually as usual. +func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) + needIsInit := false + fields := od.Fields() + for i, lim := 0, fields.Len(); i < lim; i++ { + fd := od.Fields().Get(i) + num := fd.Number() + // Make a copy of the original coderFieldInfo for use in unmarshaling. + // + // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. + // + // mi.coderFields[num].marshal is set on only the first field in the oneof, + // and dispatches to the field-specific marshaler in oneofFields. + cf := *mi.coderFields[num] + ot := si.oneofWrappersByNumber[num] + cf.ft = ot.Field(0).Type + cf.mi, cf.funcs = fieldCoder(fd, cf.ft) + oneofFields[ot] = &cf + if cf.funcs.isInit != nil { + needIsInit = true + } + mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + var vw reflect.Value // pointer to wrapper type + vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind + if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { + vw = vi.Elem() + } else { + vw = reflect.New(ot) + } + out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) + if err != nil { + return out, err + } + vi.Set(vw) + return out, nil + } + } + getInfo := func(p pointer) (pointer, *coderFieldInfo) { + v := p.AsValueOf(ft).Elem() + if v.IsNil() { + return pointer{}, nil + } + v = v.Elem() // interface -> *struct + if v.IsNil() { + return pointer{}, nil + } + return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] + } + first := mi.coderFields[od.Fields().Get(0).Number()] + first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { + p, info := getInfo(p) + if info == nil || info.funcs.size == nil { + return 0 + } + return info.funcs.size(p, info, opts) + } + first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { + p, info := getInfo(p) + if info == nil || info.funcs.marshal == nil { + return b, nil + } + return info.funcs.marshal(b, p, info, opts) + } + first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { + srcp, srcinfo := getInfo(src) + if srcinfo == nil || srcinfo.funcs.merge == nil { + return + } + dstp, dstinfo := getInfo(dst) + if dstinfo != srcinfo { + dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) + dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) + } + srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) + } + if needIsInit { + first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { + p, info := getInfo(p) + if info == nil || info.funcs.isInit == nil { + return nil + } + return info.funcs.isInit(p, info) + } + } +} + +func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + }) + } + + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m, ok := p.WeakFields().get(f.num) + if !ok { + return 0 + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m, ok := p.WeakFields().get(f.num) + if !ok { + return b, nil + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + fs := p.WeakFields() + m, ok := fs.get(f.num) + if !ok { + lazyInit() + if messageType == nil { + return unmarshalOutput{}, errUnknown + } + m = messageType.New().Interface() + fs.set(f.num, m) + } + return consumeMessage(b, m, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m, ok := p.WeakFields().get(f.num) + if !ok { + return nil + } + return proto.CheckInitialized(m) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + sm, ok := src.WeakFields().get(f.num) + if !ok { + return + } + dm, ok := dst.WeakFields().get(f.num) + if !ok { + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + dm = messageType.New().Interface() + dst.WeakFields().set(f.num, dm) + } + opts.Merge(dm, sm) + }, + } +} + +func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, + marshal: appendMessageInfo, + unmarshal: consumeMessageInfo, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeMessage(b, asMessage(mp), wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize +} + +func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) + return f.mi.marshalAppendPointer(b, p.Elem(), opts) +} + +func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageInfo(p pointer, f *coderFieldInfo) error { + return f.mi.checkInitializedPointer(p.Elem()) +} + +func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { + return protowire.SizeBytes(proto.Size(m)) + tagsize +} + +func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(proto.Size(m))) + return opts.Options().MarshalAppend(b, m) +} + +func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) +} + +func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) +} + +func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err +} + +func isInitMessageValue(v pref.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) +} + +var coderMessageValue = valueCoderFuncs{ + size: sizeMessageValue, + marshal: appendMessageValue, + unmarshal: consumeMessageValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) +} + +func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) +} + +func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +} + +var coderGroupValue = valueCoderFuncs{ + size: sizeGroupValue, + marshal: appendGroupValue, + unmarshal: consumeGroupValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupType, + marshal: appendGroupType, + unmarshal: consumeGroupType, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeGroup(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendGroup(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeGroup(b, asMessage(mp), num, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) +} + +func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) +} + +func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { + return 2*tagsize + proto.Size(m) +} + +func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) // start group + b, err := opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, wiretag+1) // end group + return b, err +} + +func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, protowire.ParseError(n) + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, + marshal: appendMessageSliceInfo, + unmarshal: consumeMessageSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMessageSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMessageSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeMessageSlice(b, p, ft, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { + s := p.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func isInitMessageSlice(p pointer, goType reflect.Type) error { + s := p.PointerSlice() + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +// Slices of messages + +func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return pref.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return pref.Value{}, out, protowire.ParseError(n) + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +func isInitMessageSliceValue(listv pref.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +var coderMessageSliceValue = valueCoderFuncs{ + size: sizeMessageSliceValue, + marshal: appendMessageSliceValue, + unmarshal: consumeMessageSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) // start group + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { + return pref.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return pref.Value{}, out, protowire.ParseError(n) + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +var coderGroupSliceValue = valueCoderFuncs{ + size: sizeGroupSliceValue, + marshal: appendGroupSliceValue, + unmarshal: consumeGroupSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupSliceInfo, + marshal: appendGroupSliceInfo, + unmarshal: consumeGroupSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeGroupSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendGroupSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeGroupSlice(b, p, num, wtyp, ft, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + b = protowire.AppendVarint(b, wiretag) // start group + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, protowire.ParseError(n) + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + if wtyp != protowire.StartGroupType { + return unmarshalOutput{}, errUnknown + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + return out, nil +} + +func asMessage(v reflect.Value) pref.ProtoMessage { + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go new file mode 100644 index 00000000..ff198d0a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -0,0 +1,5637 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// sizeBool returns the size of wire encoding a bool pointer as a Bool. +func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bool() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBool wire encodes a bool pointer as a Bool. +func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bool() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBool wire decodes a bool pointer as a Bool. +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bool() = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBool = pointerCoderFuncs{ + size: sizeBool, + marshal: appendBool, + unmarshal: consumeBool, + merge: mergeBool, +} + +// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. +// The zero value is not encoded. +func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bool() + if v == false { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolNoZero wire encodes a bool pointer as a Bool. +// The zero value is not encoded. +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bool() + if v == false { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +var coderBoolNoZero = pointerCoderFuncs{ + size: sizeBoolNoZero, + marshal: appendBoolNoZero, + unmarshal: consumeBool, + merge: mergeBoolNoZero, +} + +// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. +// It panics if the pointer is nil. +func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.BoolPtr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolPtr wire encodes a *bool pointer as a Bool. +// It panics if the pointer is nil. +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.BoolPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBoolPtr wire decodes a *bool pointer as a Bool. +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.BoolPtr() + if *vp == nil { + *vp = new(bool) + } + **vp = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBoolPtr = pointerCoderFuncs{ + size: sizeBoolPtr, + marshal: appendBoolPtr, + unmarshal: consumeBoolPtr, + merge: mergeBoolPtr, +} + +// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. +func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BoolSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) + } + return size +} + +// appendBoolSlice encodes a []bool pointer as a repeated Bool. +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BoolSlice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, protowire.DecodeBool(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, protowire.DecodeBool(v)) + out.n = n + return out, nil +} + +var coderBoolSlice = pointerCoderFuncs{ + size: sizeBoolSlice, + marshal: appendBoolSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BoolSlice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +var coderBoolPackedSlice = pointerCoderFuncs{ + size: sizeBoolPackedSlice, + marshal: appendBoolPackedSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolValue returns the size of wire encoding a bool value as a Bool. +func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) +} + +// appendBoolValue encodes a bool value as a Bool. +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + return b, nil +} + +// consumeBoolValue decodes a bool value as a Bool. +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil +} + +var coderBoolValue = valueCoderFuncs{ + size: sizeBoolValue, + marshal: appendBoolValue, + unmarshal: consumeBoolValue, + merge: mergeScalarValue, +} + +// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return size +} + +// appendBoolSliceValue encodes a []bool value as a repeated Bool. +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + out.n = n + return listv, out, nil +} + +var coderBoolSliceValue = valueCoderFuncs{ + size: sizeBoolSliceValue, + marshal: appendBoolSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +var coderBoolPackedSliceValue = valueCoderFuncs{ + size: sizeBoolPackedSliceValue, + marshal: appendBoolPackedSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeEnumValue returns the size of wire encoding a value as a Enum. +func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Enum())) +} + +// appendEnumValue encodes a value as a Enum. +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + return b, nil +} + +// consumeEnumValue decodes a value as a Enum. +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil +} + +var coderEnumValue = valueCoderFuncs{ + size: sizeEnumValue, + marshal: appendEnumValue, + unmarshal: consumeEnumValue, + merge: mergeScalarValue, +} + +// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Enum())) + } + return size +} + +// appendEnumSliceValue encodes a [] value as a repeated Enum. +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + out.n = n + return listv, out, nil +} + +var coderEnumSliceValue = valueCoderFuncs{ + size: sizeEnumSliceValue, + marshal: appendEnumSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +var coderEnumPackedSliceValue = valueCoderFuncs{ + size: sizeEnumPackedSliceValue, + marshal: appendEnumPackedSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. +func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32 wire encodes a int32 pointer as a Int32. +func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32 wire decodes a int32 pointer as a Int32. +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderInt32 = pointerCoderFuncs{ + size: sizeInt32, + marshal: appendInt32, + unmarshal: consumeInt32, + merge: mergeInt32, +} + +// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. +// The zero value is not encoded. +func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32NoZero wire encodes a int32 pointer as a Int32. +// The zero value is not encoded. +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt32NoZero = pointerCoderFuncs{ + size: sizeInt32NoZero, + marshal: appendInt32NoZero, + unmarshal: consumeInt32, + merge: mergeInt32NoZero, +} + +// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32Ptr wire encodes a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderInt32Ptr = pointerCoderFuncs{ + size: sizeInt32Ptr, + marshal: appendInt32Ptr, + unmarshal: consumeInt32Ptr, + merge: mergeInt32Ptr, +} + +// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. +func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt32Slice encodes a []int32 pointer as a repeated Int32. +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderInt32Slice = pointerCoderFuncs{ + size: sizeInt32Slice, + marshal: appendInt32Slice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt32PackedSlice = pointerCoderFuncs{ + size: sizeInt32PackedSlice, + marshal: appendInt32PackedSlice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. +func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) +} + +// appendInt32Value encodes a int32 value as a Int32. +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + return b, nil +} + +// consumeInt32Value decodes a int32 value as a Int32. +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderInt32Value = valueCoderFuncs{ + size: sizeInt32Value, + marshal: appendInt32Value, + unmarshal: consumeInt32Value, + merge: mergeScalarValue, +} + +// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) + } + return size +} + +// appendInt32SliceValue encodes a []int32 value as a repeated Int32. +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderInt32SliceValue = valueCoderFuncs{ + size: sizeInt32SliceValue, + marshal: appendInt32SliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +var coderInt32PackedSliceValue = valueCoderFuncs{ + size: sizeInt32PackedSliceValue, + marshal: appendInt32PackedSliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. +func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32 wire encodes a int32 pointer as a Sint32. +func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32 wire decodes a int32 pointer as a Sint32. +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32 = pointerCoderFuncs{ + size: sizeSint32, + marshal: appendSint32, + unmarshal: consumeSint32, + merge: mergeInt32, +} + +// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. +// The zero value is not encoded. +func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32NoZero wire encodes a int32 pointer as a Sint32. +// The zero value is not encoded. +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +var coderSint32NoZero = pointerCoderFuncs{ + size: sizeSint32NoZero, + marshal: appendSint32NoZero, + unmarshal: consumeSint32, + merge: mergeInt32NoZero, +} + +// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32Ptr = pointerCoderFuncs{ + size: sizeSint32Ptr, + marshal: appendSint32Ptr, + unmarshal: consumeSint32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. +func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return size +} + +// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + out.n = n + return out, nil +} + +var coderSint32Slice = pointerCoderFuncs{ + size: sizeSint32Slice, + marshal: appendSint32Slice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +var coderSint32PackedSlice = pointerCoderFuncs{ + size: sizeSint32PackedSlice, + marshal: appendSint32PackedSlice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. +func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) +} + +// appendSint32Value encodes a int32 value as a Sint32. +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + return b, nil +} + +// consumeSint32Value decodes a int32 value as a Sint32. +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil +} + +var coderSint32Value = valueCoderFuncs{ + size: sizeSint32Value, + marshal: appendSint32Value, + unmarshal: consumeSint32Value, + merge: mergeScalarValue, +} + +// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return size +} + +// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + out.n = n + return listv, out, nil +} + +var coderSint32SliceValue = valueCoderFuncs{ + size: sizeSint32SliceValue, + marshal: appendSint32SliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +var coderSint32PackedSliceValue = valueCoderFuncs{ + size: sizeSint32PackedSliceValue, + marshal: appendSint32PackedSliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. +func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32 wire encodes a uint32 pointer as a Uint32. +func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32 wire decodes a uint32 pointer as a Uint32. +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint32() = uint32(v) + out.n = n + return out, nil +} + +var coderUint32 = pointerCoderFuncs{ + size: sizeUint32, + marshal: appendUint32, + unmarshal: consumeUint32, + merge: mergeUint32, +} + +// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. +// The zero value is not encoded. +func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. +// The zero value is not encoded. +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderUint32NoZero = pointerCoderFuncs{ + size: sizeUint32NoZero, + marshal: appendUint32NoZero, + unmarshal: consumeUint32, + merge: mergeUint32NoZero, +} + +// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Uint32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = uint32(v) + out.n = n + return out, nil +} + +var coderUint32Ptr = pointerCoderFuncs{ + size: sizeUint32Ptr, + marshal: appendUint32Ptr, + unmarshal: consumeUint32Ptr, + merge: mergeUint32Ptr, +} + +// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. +func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, uint32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, uint32(v)) + out.n = n + return out, nil +} + +var coderUint32Slice = pointerCoderFuncs{ + size: sizeUint32Slice, + marshal: appendUint32Slice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderUint32PackedSlice = pointerCoderFuncs{ + size: sizeUint32PackedSlice, + marshal: appendUint32PackedSlice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. +func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) +} + +// appendUint32Value encodes a uint32 value as a Uint32. +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + return b, nil +} + +// consumeUint32Value decodes a uint32 value as a Uint32. +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderUint32Value = valueCoderFuncs{ + size: sizeUint32Value, + marshal: appendUint32Value, + unmarshal: consumeUint32Value, + merge: mergeScalarValue, +} + +// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return size +} + +// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderUint32SliceValue = valueCoderFuncs{ + size: sizeUint32SliceValue, + marshal: appendUint32SliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +var coderUint32PackedSliceValue = valueCoderFuncs{ + size: sizeUint32PackedSliceValue, + marshal: appendUint32PackedSliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. +func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64 wire encodes a int64 pointer as a Int64. +func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64 wire decodes a int64 pointer as a Int64. +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderInt64 = pointerCoderFuncs{ + size: sizeInt64, + marshal: appendInt64, + unmarshal: consumeInt64, + merge: mergeInt64, +} + +// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. +// The zero value is not encoded. +func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64NoZero wire encodes a int64 pointer as a Int64. +// The zero value is not encoded. +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt64NoZero = pointerCoderFuncs{ + size: sizeInt64NoZero, + marshal: appendInt64NoZero, + unmarshal: consumeInt64, + merge: mergeInt64NoZero, +} + +// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64Ptr wire encodes a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderInt64Ptr = pointerCoderFuncs{ + size: sizeInt64Ptr, + marshal: appendInt64Ptr, + unmarshal: consumeInt64Ptr, + merge: mergeInt64Ptr, +} + +// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. +func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt64Slice encodes a []int64 pointer as a repeated Int64. +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderInt64Slice = pointerCoderFuncs{ + size: sizeInt64Slice, + marshal: appendInt64Slice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt64PackedSlice = pointerCoderFuncs{ + size: sizeInt64PackedSlice, + marshal: appendInt64PackedSlice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. +func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Int())) +} + +// appendInt64Value encodes a int64 value as a Int64. +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + return b, nil +} + +// consumeInt64Value decodes a int64 value as a Int64. +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderInt64Value = valueCoderFuncs{ + size: sizeInt64Value, + marshal: appendInt64Value, + unmarshal: consumeInt64Value, + merge: mergeScalarValue, +} + +// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Int())) + } + return size +} + +// appendInt64SliceValue encodes a []int64 value as a repeated Int64. +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderInt64SliceValue = valueCoderFuncs{ + size: sizeInt64SliceValue, + marshal: appendInt64SliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +var coderInt64PackedSliceValue = valueCoderFuncs{ + size: sizeInt64PackedSliceValue, + marshal: appendInt64PackedSliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. +func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64 wire encodes a int64 pointer as a Sint64. +func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64 wire decodes a int64 pointer as a Sint64. +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64 = pointerCoderFuncs{ + size: sizeSint64, + marshal: appendSint64, + unmarshal: consumeSint64, + merge: mergeInt64, +} + +// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. +// The zero value is not encoded. +func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64NoZero wire encodes a int64 pointer as a Sint64. +// The zero value is not encoded. +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +var coderSint64NoZero = pointerCoderFuncs{ + size: sizeSint64NoZero, + marshal: appendSint64NoZero, + unmarshal: consumeSint64, + merge: mergeInt64NoZero, +} + +// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64Ptr = pointerCoderFuncs{ + size: sizeSint64Ptr, + marshal: appendSint64Ptr, + unmarshal: consumeSint64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. +func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return size +} + +// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, protowire.DecodeZigZag(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, protowire.DecodeZigZag(v)) + out.n = n + return out, nil +} + +var coderSint64Slice = pointerCoderFuncs{ + size: sizeSint64Slice, + marshal: appendSint64Slice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +var coderSint64PackedSlice = pointerCoderFuncs{ + size: sizeSint64PackedSlice, + marshal: appendSint64PackedSlice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. +func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) +} + +// appendSint64Value encodes a int64 value as a Sint64. +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + return b, nil +} + +// consumeSint64Value decodes a int64 value as a Sint64. +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil +} + +var coderSint64Value = valueCoderFuncs{ + size: sizeSint64Value, + marshal: appendSint64Value, + unmarshal: consumeSint64Value, + merge: mergeScalarValue, +} + +// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return size +} + +// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + out.n = n + return listv, out, nil +} + +var coderSint64SliceValue = valueCoderFuncs{ + size: sizeSint64SliceValue, + marshal: appendSint64SliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +var coderSint64PackedSliceValue = valueCoderFuncs{ + size: sizeSint64PackedSliceValue, + marshal: appendSint64PackedSliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. +func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64 wire encodes a uint64 pointer as a Uint64. +func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64 wire decodes a uint64 pointer as a Uint64. +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderUint64 = pointerCoderFuncs{ + size: sizeUint64, + marshal: appendUint64, + unmarshal: consumeUint64, + merge: mergeUint64, +} + +// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. +// The zero value is not encoded. +func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. +// The zero value is not encoded. +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +var coderUint64NoZero = pointerCoderFuncs{ + size: sizeUint64NoZero, + marshal: appendUint64NoZero, + unmarshal: consumeUint64, + merge: mergeUint64NoZero, +} + +// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Uint64Ptr() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderUint64Ptr = pointerCoderFuncs{ + size: sizeUint64Ptr, + marshal: appendUint64Ptr, + unmarshal: consumeUint64Ptr, + merge: mergeUint64Ptr, +} + +// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. +func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(v) + } + return size +} + +// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderUint64Slice = pointerCoderFuncs{ + size: sizeUint64Slice, + marshal: appendUint64Slice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +var coderUint64PackedSlice = pointerCoderFuncs{ + size: sizeUint64PackedSlice, + marshal: appendUint64PackedSlice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. +func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(v.Uint()) +} + +// appendUint64Value encodes a uint64 value as a Uint64. +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + return b, nil +} + +// consumeUint64Value decodes a uint64 value as a Uint64. +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderUint64Value = valueCoderFuncs{ + size: sizeUint64Value, + marshal: appendUint64Value, + unmarshal: consumeUint64Value, + merge: mergeScalarValue, +} + +// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(v.Uint()) + } + return size +} + +// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderUint64SliceValue = valueCoderFuncs{ + size: sizeUint64SliceValue, + marshal: appendUint64SliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +var coderUint64PackedSliceValue = valueCoderFuncs{ + size: sizeUint64PackedSliceValue, + marshal: appendUint64PackedSliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. +func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32 = pointerCoderFuncs{ + size: sizeSfixed32, + marshal: appendSfixed32, + unmarshal: consumeSfixed32, + merge: mergeInt32, +} + +// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +var coderSfixed32NoZero = pointerCoderFuncs{ + size: sizeSfixed32NoZero, + marshal: appendSfixed32NoZero, + unmarshal: consumeSfixed32, + merge: mergeInt32NoZero, +} + +// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32Ptr = pointerCoderFuncs{ + size: sizeSfixed32Ptr, + marshal: appendSfixed32Ptr, + unmarshal: consumeSfixed32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderSfixed32Slice = pointerCoderFuncs{ + size: sizeSfixed32Slice, + marshal: appendSfixed32Slice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +var coderSfixed32PackedSlice = pointerCoderFuncs{ + size: sizeSfixed32PackedSlice, + marshal: appendSfixed32PackedSlice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. +func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Value encodes a int32 value as a Sfixed32. +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + return b, nil +} + +// consumeSfixed32Value decodes a int32 value as a Sfixed32. +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderSfixed32Value = valueCoderFuncs{ + size: sizeSfixed32Value, + marshal: appendSfixed32Value, + unmarshal: consumeSfixed32Value, + merge: mergeScalarValue, +} + +// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed32SliceValue = valueCoderFuncs{ + size: sizeSfixed32SliceValue, + marshal: appendSfixed32SliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +var coderSfixed32PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed32PackedSliceValue, + marshal: appendSfixed32PackedSliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. +func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32 wire encodes a uint32 pointer as a Fixed32. +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint32() = v + out.n = n + return out, nil +} + +var coderFixed32 = pointerCoderFuncs{ + size: sizeFixed32, + marshal: appendFixed32, + unmarshal: consumeFixed32, + merge: mergeUint32, +} + +// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +var coderFixed32NoZero = pointerCoderFuncs{ + size: sizeFixed32NoZero, + marshal: appendFixed32NoZero, + unmarshal: consumeFixed32, + merge: mergeUint32NoZero, +} + +// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed32Ptr = pointerCoderFuncs{ + size: sizeFixed32Ptr, + marshal: appendFixed32Ptr, + unmarshal: consumeFixed32Ptr, + merge: mergeUint32Ptr, +} + +// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. +func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed32Slice = pointerCoderFuncs{ + size: sizeFixed32Slice, + marshal: appendFixed32Slice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +var coderFixed32PackedSlice = pointerCoderFuncs{ + size: sizeFixed32PackedSlice, + marshal: appendFixed32PackedSlice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. +func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFixed32Value encodes a uint32 value as a Fixed32. +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + return b, nil +} + +// consumeFixed32Value decodes a uint32 value as a Fixed32. +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderFixed32Value = valueCoderFuncs{ + size: sizeFixed32Value, + marshal: appendFixed32Value, + unmarshal: consumeFixed32Value, + merge: mergeScalarValue, +} + +// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderFixed32SliceValue = valueCoderFuncs{ + size: sizeFixed32SliceValue, + marshal: appendFixed32SliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +var coderFixed32PackedSliceValue = valueCoderFuncs{ + size: sizeFixed32PackedSliceValue, + marshal: appendFixed32PackedSliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFloat returns the size of wire encoding a float32 pointer as a Float. +func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloat wire encodes a float32 pointer as a Float. +func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloat wire decodes a float32 pointer as a Float. +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Float32() = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloat = pointerCoderFuncs{ + size: sizeFloat, + marshal: appendFloat, + unmarshal: consumeFloat, + merge: mergeFloat32, +} + +// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. +// The zero value is not encoded. +func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatNoZero wire encodes a float32 pointer as a Float. +// The zero value is not encoded. +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +var coderFloatNoZero = pointerCoderFuncs{ + size: sizeFloatNoZero, + marshal: appendFloatNoZero, + unmarshal: consumeFloat, + merge: mergeFloat32NoZero, +} + +// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. +// It panics if the pointer is nil. +func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatPtr wire encodes a *float32 pointer as a Float. +// It panics if the pointer is nil. +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Float32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloatPtr wire decodes a *float32 pointer as a Float. +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Float32Ptr() + if *vp == nil { + *vp = new(float32) + } + **vp = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloatPtr = pointerCoderFuncs{ + size: sizeFloatPtr, + marshal: appendFloatPtr, + unmarshal: consumeFloatPtr, + merge: mergeFloat32Ptr, +} + +// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. +func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSlice encodes a []float32 pointer as a repeated Float. +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, math.Float32frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, math.Float32frombits(v)) + out.n = n + return out, nil +} + +var coderFloatSlice = pointerCoderFuncs{ + size: sizeFloatSlice, + marshal: appendFloatSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +var coderFloatPackedSlice = pointerCoderFuncs{ + size: sizeFloatPackedSlice, + marshal: appendFloatPackedSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatValue returns the size of wire encoding a float32 value as a Float. +func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFloatValue encodes a float32 value as a Float. +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + return b, nil +} + +// consumeFloatValue decodes a float32 value as a Float. +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil +} + +var coderFloatValue = valueCoderFuncs{ + size: sizeFloatValue, + marshal: appendFloatValue, + unmarshal: consumeFloatValue, + merge: mergeScalarValue, +} + +// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSliceValue encodes a []float32 value as a repeated Float. +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + out.n = n + return listv, out, nil +} + +var coderFloatSliceValue = valueCoderFuncs{ + size: sizeFloatSliceValue, + marshal: appendFloatSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +var coderFloatPackedSliceValue = valueCoderFuncs{ + size: sizeFloatPackedSliceValue, + marshal: appendFloatPackedSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. +func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64 = pointerCoderFuncs{ + size: sizeSfixed64, + marshal: appendSfixed64, + unmarshal: consumeSfixed64, + merge: mergeInt64, +} + +// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +var coderSfixed64NoZero = pointerCoderFuncs{ + size: sizeSfixed64NoZero, + marshal: appendSfixed64NoZero, + unmarshal: consumeSfixed64, + merge: mergeInt64NoZero, +} + +// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64Ptr = pointerCoderFuncs{ + size: sizeSfixed64Ptr, + marshal: appendSfixed64Ptr, + unmarshal: consumeSfixed64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderSfixed64Slice = pointerCoderFuncs{ + size: sizeSfixed64Slice, + marshal: appendSfixed64Slice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +var coderSfixed64PackedSlice = pointerCoderFuncs{ + size: sizeSfixed64PackedSlice, + marshal: appendSfixed64PackedSlice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. +func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Value encodes a int64 value as a Sfixed64. +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + return b, nil +} + +// consumeSfixed64Value decodes a int64 value as a Sfixed64. +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderSfixed64Value = valueCoderFuncs{ + size: sizeSfixed64Value, + marshal: appendSfixed64Value, + unmarshal: consumeSfixed64Value, + merge: mergeScalarValue, +} + +// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed64SliceValue = valueCoderFuncs{ + size: sizeSfixed64SliceValue, + marshal: appendSfixed64SliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +var coderSfixed64PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed64PackedSliceValue, + marshal: appendSfixed64PackedSliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. +func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64 wire encodes a uint64 pointer as a Fixed64. +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderFixed64 = pointerCoderFuncs{ + size: sizeFixed64, + marshal: appendFixed64, + unmarshal: consumeFixed64, + merge: mergeUint64, +} + +// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +var coderFixed64NoZero = pointerCoderFuncs{ + size: sizeFixed64NoZero, + marshal: appendFixed64NoZero, + unmarshal: consumeFixed64, + merge: mergeUint64NoZero, +} + +// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed64Ptr = pointerCoderFuncs{ + size: sizeFixed64Ptr, + marshal: appendFixed64Ptr, + unmarshal: consumeFixed64Ptr, + merge: mergeUint64Ptr, +} + +// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. +func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed64Slice = pointerCoderFuncs{ + size: sizeFixed64Slice, + marshal: appendFixed64Slice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +var coderFixed64PackedSlice = pointerCoderFuncs{ + size: sizeFixed64PackedSlice, + marshal: appendFixed64PackedSlice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. +func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendFixed64Value encodes a uint64 value as a Fixed64. +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + return b, nil +} + +// consumeFixed64Value decodes a uint64 value as a Fixed64. +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderFixed64Value = valueCoderFuncs{ + size: sizeFixed64Value, + marshal: appendFixed64Value, + unmarshal: consumeFixed64Value, + merge: mergeScalarValue, +} + +// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderFixed64SliceValue = valueCoderFuncs{ + size: sizeFixed64SliceValue, + marshal: appendFixed64SliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +var coderFixed64PackedSliceValue = valueCoderFuncs{ + size: sizeFixed64PackedSliceValue, + marshal: appendFixed64PackedSliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeDouble returns the size of wire encoding a float64 pointer as a Double. +func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendDouble wire encodes a float64 pointer as a Double. +func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDouble wire decodes a float64 pointer as a Double. +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Float64() = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDouble = pointerCoderFuncs{ + size: sizeDouble, + marshal: appendDouble, + unmarshal: consumeDouble, + merge: mergeFloat64, +} + +// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. +// The zero value is not encoded. +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoubleNoZero wire encodes a float64 pointer as a Double. +// The zero value is not encoded. +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +var coderDoubleNoZero = pointerCoderFuncs{ + size: sizeDoubleNoZero, + marshal: appendDoubleNoZero, + unmarshal: consumeDouble, + merge: mergeFloat64NoZero, +} + +// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. +// It panics if the pointer is nil. +func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoublePtr wire encodes a *float64 pointer as a Double. +// It panics if the pointer is nil. +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Float64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDoublePtr wire decodes a *float64 pointer as a Double. +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Float64Ptr() + if *vp == nil { + *vp = new(float64) + } + **vp = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDoublePtr = pointerCoderFuncs{ + size: sizeDoublePtr, + marshal: appendDoublePtr, + unmarshal: consumeDoublePtr, + merge: mergeFloat64Ptr, +} + +// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. +func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSlice encodes a []float64 pointer as a repeated Double. +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, math.Float64frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, math.Float64frombits(v)) + out.n = n + return out, nil +} + +var coderDoubleSlice = pointerCoderFuncs{ + size: sizeDoubleSlice, + marshal: appendDoubleSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +var coderDoublePackedSlice = pointerCoderFuncs{ + size: sizeDoublePackedSlice, + marshal: appendDoublePackedSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. +func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendDoubleValue encodes a float64 value as a Double. +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + return b, nil +} + +// consumeDoubleValue decodes a float64 value as a Double. +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil +} + +var coderDoubleValue = valueCoderFuncs{ + size: sizeDoubleValue, + marshal: appendDoubleValue, + unmarshal: consumeDoubleValue, + merge: mergeScalarValue, +} + +// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSliceValue encodes a []float64 value as a repeated Double. +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + out.n = n + return listv, out, nil +} + +var coderDoubleSliceValue = valueCoderFuncs{ + size: sizeDoubleSliceValue, + marshal: appendDoubleSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +var coderDoublePackedSliceValue = valueCoderFuncs{ + size: sizeDoublePackedSliceValue, + marshal: appendDoublePackedSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeString returns the size of wire encoding a string pointer as a String. +func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.String() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendString wire encodes a string pointer as a String. +func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeString wire decodes a string pointer as a String. +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.String() = v + out.n = n + return out, nil +} + +var coderString = pointerCoderFuncs{ + size: sizeString, + marshal: appendString, + unmarshal: consumeString, + merge: mergeString, +} + +// appendStringValidateUTF8 wire encodes a string pointer as a String. +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValidateUTF8 wire decodes a string pointer as a String. +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + *p.String() = v + out.n = n + return out, nil +} + +var coderStringValidateUTF8 = pointerCoderFuncs{ + size: sizeString, + marshal: appendStringValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeString, +} + +// sizeStringNoZero returns the size of wire encoding a string pointer as a String. +// The zero value is not encoded. +func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.String() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringNoZero wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +var coderStringNoZero = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZero, + unmarshal: consumeString, + merge: mergeStringNoZero, +} + +// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZeroValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeStringNoZero, +} + +// sizeStringPtr returns the size of wire encoding a *string pointer as a String. +// It panics if the pointer is nil. +func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.StringPtr() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringPtr wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeStringPtr wire decodes a *string pointer as a String. +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtr = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtr, + unmarshal: consumeStringPtr, + merge: mergeStringPtr, +} + +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + +// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. +func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.StringSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendStringSlice encodes a []string pointer as a repeated String. +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + } + return b, nil +} + +// consumeStringSlice wire decodes a []string pointer as a repeated String. +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderStringSlice = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSlice, + unmarshal: consumeStringSlice, + merge: mergeStringSlice, +} + +// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderStringSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSliceValidateUTF8, + unmarshal: consumeStringSliceValidateUTF8, + merge: mergeStringSlice, +} + +// sizeStringValue returns the size of wire encoding a string value as a String. +func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.String())) +} + +// appendStringValue encodes a string value as a String. +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + return b, nil +} + +// consumeStringValue decodes a string value as a String. +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValue = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValue, + unmarshal: consumeStringValue, + merge: mergeScalarValue, +} + +// appendStringValueValidateUTF8 encodes a string value as a String. +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + if !utf8.ValidString(v.String()) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValueValidateUTF8 decodes a string value as a String. +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return protoreflect.Value{}, out, errInvalidUTF8{} + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValueValidateUTF8 = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValueValidateUTF8, + unmarshal: consumeStringValueValidateUTF8, + merge: mergeScalarValue, +} + +// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.String())) + } + return size +} + +// appendStringSliceValue encodes a []string value as a repeated String. +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + } + return b, nil +} + +// consumeStringSliceValue wire decodes a []string value as a repeated String. +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfString(string(v))) + out.n = n + return listv, out, nil +} + +var coderStringSliceValue = valueCoderFuncs{ + size: sizeStringSliceValue, + marshal: appendStringSliceValue, + unmarshal: consumeStringSliceValue, + merge: mergeListValue, +} + +// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. +func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bytes() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytes wire encodes a []byte pointer as a Bytes. +func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytes wire decodes a []byte pointer as a Bytes. +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytes = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytes, + unmarshal: consumeBytes, + merge: mergeBytes, +} + +// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytesValidateUTF8 = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytesValidateUTF8, + unmarshal: consumeBytesValidateUTF8, + merge: mergeBytes, +} + +// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. +// The zero value is not encoded. +func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bytes() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytesNoZero wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. +// The zero value is not decoded. +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZero = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZero, + unmarshal: consumeBytesNoZero, + merge: mergeBytesNoZero, +} + +// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZeroValidateUTF8, + unmarshal: consumeBytesNoZeroValidateUTF8, + merge: mergeBytesNoZero, +} + +// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. +func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BytesSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + } + return b, nil +} + +// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSlice = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSlice, + unmarshal: consumeBytesSlice, + merge: mergeBytesSlice, +} + +// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSliceValidateUTF8, + unmarshal: consumeBytesSliceValidateUTF8, + merge: mergeBytesSlice, +} + +// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. +func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.Bytes())) +} + +// appendBytesValue encodes a []byte value as a Bytes. +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + return b, nil +} + +// consumeBytesValue decodes a []byte value as a Bytes. +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil +} + +var coderBytesValue = valueCoderFuncs{ + size: sizeBytesValue, + marshal: appendBytesValue, + unmarshal: consumeBytesValue, + merge: mergeBytesValue, +} + +// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.Bytes())) + } + return size +} + +// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + } + return b, nil +} + +// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + out.n = n + return listv, out, nil +} + +var coderBytesSliceValue = valueCoderFuncs{ + size: sizeBytesSliceValue, + marshal: appendBytesSliceValue, + unmarshal: consumeBytesSliceValue, + merge: mergeBytesListValue, +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go new file mode 100644 index 00000000..44885a76 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -0,0 +1,389 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "errors" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapInfo struct { + goType reflect.Type + keyWiretag uint64 + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs + keyZero pref.Value + keyKind pref.Kind + conv *mapConverter +} + +func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() + keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) + valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) + keyFuncs := encoderFuncsForValue(keyField) + valFuncs := encoderFuncsForValue(valField) + conv := newMapConverter(ft, fd) + + mapi := &mapInfo{ + goType: ft, + keyWiretag: keyWiretag, + valWiretag: valWiretag, + keyFuncs: keyFuncs, + valFuncs: valFuncs, + keyZero: keyField.Default(), + keyKind: keyField.Kind(), + conv: conv, + } + if valField.Kind() == pref.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft) + if mp.Elem().IsNil() { + mp.Elem().Set(reflect.MakeMap(mapi.goType)) + } + if f.mi == nil { + return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) + } else { + return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) + } + }, + } + switch valField.Kind() { + case pref.MessageKind: + funcs.merge = mergeMapOfMessage + case pref.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap + } + if valFuncs.isInit != nil { + funcs.isInit = func(p pointer, f *coderFieldInfo) error { + return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) + } + } + return valueMessage, funcs +} + +const ( + mapKeyTagSize = 1 // field 1, tag size 1. + mapValTagSize = 1 // field 2, tag size 2. +) + +func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { + if mapv.Len() == 0 { + return 0 + } + n := 0 + iter := mapRange(mapv) + for iter.Next() { + key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() + keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + var valSize int + value := mapi.conv.valConv.PBValueOf(iter.Value()) + if f.mi == nil { + valSize = mapi.valFuncs.size(value, mapValTagSize, opts) + } else { + p := pointerOfValue(iter.Value()) + valSize += mapValTagSize + valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) + } + n += f.tagsize + protowire.SizeBytes(keySize+valSize) + } + return n +} + +func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var ( + key = mapi.keyZero + val = mapi.conv.valConv.New() + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return out, errors.New("invalid field number") + } + b = b[n:] + err := errUnknown + switch num { + case genid.MapEntry_Key_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case genid.MapEntry_Value_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { + break + } + val = v + n = o.n + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) + out.n = n + return out, nil +} + +func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var ( + key = mapi.keyZero + val = reflect.New(f.mi.GoReflectType.Elem()) + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return out, errors.New("invalid field number") + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + if wtyp != protowire.BytesType { + break + } + var v []byte + v, n = protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var o unmarshalOutput + o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) + if o.initialized { + // Consider this map item initialized so long as we see + // an initialized value. + out.initialized = true + } + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) + out.n = n + return out, nil +} + +func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if f.mi == nil { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := mapi.conv.valConv.PBValueOf(valrv) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapi.valFuncs.size(val, mapValTagSize, opts) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + } else { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := pointerOfValue(valrv) + valSize := f.mi.sizePointer(val, opts) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapValTagSize + protowire.SizeBytes(valSize) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b = protowire.AppendVarint(b, mapi.valWiretag) + b = protowire.AppendVarint(b, uint64(valSize)) + return f.mi.marshalAppendPointer(b, val, opts) + } +} + +func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if mapv.Len() == 0 { + return b, nil + } + if opts.Deterministic() { + return appendMapDeterministic(b, mapv, mapi, f, opts) + } + iter := mapRange(mapv) + for iter.Next() { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + keys := mapv.MapKeys() + sort.Slice(keys, func(i, j int) bool { + switch keys[i].Kind() { + case reflect.Bool: + return !keys[i].Bool() && keys[j].Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return keys[i].Int() < keys[j].Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return keys[i].Uint() < keys[j].Uint() + case reflect.Float32, reflect.Float64: + return keys[i].Float() < keys[j].Float() + case reflect.String: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keys[i].Kind().String()) + } + }) + for _, key := range keys { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { + if mi := f.mi; mi != nil { + mi.init() + if !mi.needsInitCheck { + return nil + } + iter := mapRange(mapv) + for iter.Next() { + val := pointerOfValue(iter.Value()) + if err := mi.checkInitializedPointer(val); err != nil { + return err + } + } + } else { + iter := mapRange(mapv) + for iter.Next() { + val := mapi.conv.valConv.PBValueOf(iter.Value()) + if err := mapi.valFuncs.isInit(val); err != nil { + return err + } + } + } + return nil +} + +func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), iter.Value()) + } +} + +func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) + } +} + +func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + val := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) + } else { + opts.Merge(asMessage(val), asMessage(iter.Value())) + } + dstm.SetMapIndex(iter.Key(), val) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go new file mode 100644 index 00000000..2706bb67 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package impl + +import "reflect" + +type mapIter struct { + v reflect.Value + keys []reflect.Value +} + +// mapRange provides a less-efficient equivalent to +// the Go 1.12 reflect.Value.MapRange method. +func mapRange(v reflect.Value) *mapIter { + return &mapIter{v: v} +} + +func (i *mapIter) Next() bool { + if i.keys == nil { + i.keys = i.v.MapKeys() + } else { + i.keys = i.keys[1:] + } + return len(i.keys) > 0 +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[0] +} + +func (i *mapIter) Value() reflect.Value { + return i.v.MapIndex(i.keys[0]) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go new file mode 100644 index 00000000..1533ef60 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package impl + +import "reflect" + +func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go new file mode 100644 index 00000000..0e176d56 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -0,0 +1,159 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/fieldsort" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// coderMessageInfo contains per-message information used by the fast-path functions. +// This is a different type from MessageInfo to keep MessageInfo as general-purpose as +// possible. +type coderMessageInfo struct { + methods piface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo + coderFields map[protowire.Number]*coderFieldInfo + sizecacheOffset offset + unknownOffset offset + extensionOffset offset + needsInitCheck bool + isMessageSet bool + numRequiredFields uint8 +} + +type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type + validation validationInfo // information used by message validation + num pref.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required +} + +func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.extensionOffset = si.extensionOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + preallocFields := make([]coderFieldInfo, fields.Len()) + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case isOneof: + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &preallocFields[i] + *cf = coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), + isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == pref.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense pref.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) >= len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return fieldsort.Less(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go new file mode 100644 index 00000000..cfb68e12 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -0,0 +1,120 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" +) + +func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { + if !flags.ProtoLegacy { + return 0 + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + for _, x := range ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + num, _ := protowire.DecodeTag(xi.wiretag) + size += messageset.SizeField(num) + size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) + } + + unknown := *p.Apply(mi.unknownOffset).Bytes() + size += messageset.SizeUnknown(unknown) + + return size +} + +func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + switch len(ext) { + case 0: + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + for _, x := range ext { + var err error + b, err = marshalMessageSetField(mi, b, x, opts) + if err != nil { + return b, err + } + } + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(ext)) + for k := range ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + var err error + b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) + if err != nil { + return b, err + } + } + } + + unknown := *p.Apply(mi.unknownOffset).Bytes() + b, err := messageset.AppendUnknown(b, unknown) + if err != nil { + return b, err + } + + return b, nil +} + +func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { + xi := getExtensionFieldInfo(x.Type()) + num, _ := protowire.DecodeTag(xi.wiretag) + b = messageset.AppendFieldStart(b, num) + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { + if !flags.ProtoLegacy { + return out, errors.New("no support for message_set_wire_format") + } + + ep := p.Apply(mi.extensionOffset).Extensions() + if *ep == nil { + *ep = make(map[int32]ExtensionField) + } + ext := *ep + unknown := p.Apply(mi.unknownOffset).Bytes() + initialized := true + err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { + o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) + if err == errUnknown { + *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) + *unknown = append(*unknown, v...) + return nil + } + if !o.initialized { + initialized = false + } + return err + }) + out.n = len(b) + out.initialized = initialized + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go new file mode 100644 index 00000000..86f7dc3c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := p.v.Elem().Int() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := p.v.Elem().Int() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + p.v.Elem().SetInt(int64(v)) + out.n = n + return out, nil +} + +func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(src.v.Elem()) +} + +var coderEnum = pointerCoderFuncs{ + size: sizeEnum, + marshal: appendEnum, + unmarshal: consumeEnum, + merge: mergeEnum, +} + +func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + if p.v.Elem().Int() == 0 { + return 0 + } + return sizeEnum(p, f, opts) +} + +func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if p.v.Elem().Int() == 0 { + return b, nil + } + return appendEnum(b, p, f, opts) +} + +func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if src.v.Elem().Int() != 0 { + dst.v.Elem().Set(src.v.Elem()) + } +} + +var coderEnumNoZero = pointerCoderFuncs{ + size: sizeEnumNoZero, + marshal: appendEnumNoZero, + unmarshal: consumeEnum, + merge: mergeEnumNoZero, +} + +func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return sizeEnum(pointer{p.v.Elem()}, f, opts) +} + +func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendEnum(b, pointer{p.v.Elem()}, f, opts) +} + +func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + if p.v.Elem().IsNil() { + p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) + } + return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) +} + +func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if !src.v.Elem().IsNil() { + v := reflect.New(dst.v.Type().Elem().Elem()) + v.Elem().Set(src.v.Elem().Elem()) + dst.v.Elem().Set(v) + } +} + +var coderEnumPtr = pointerCoderFuncs{ + size: sizeEnumPtr, + marshal: appendEnumPtr, + unmarshal: consumeEnumPtr, + merge: mergeEnumPtr, +} + +func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize + } + return size +} + +func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + s := p.v.Elem() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + b = b[n:] + } + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + out.n = n + return out, nil +} + +func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) +} + +var coderEnumSlice = pointerCoderFuncs{ + size: sizeEnumSlice, + marshal: appendEnumSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} + +func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return 0 + } + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + return f.tagsize + protowire.SizeBytes(n) +} + +func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +var coderEnumPackedSlice = pointerCoderFuncs{ + size: sizeEnumPackedSlice, + marshal: appendEnumPackedSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go new file mode 100644 index 00000000..e8997123 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -0,0 +1,557 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// pointerCoderFuncs is a set of pointer encoding functions. +type pointerCoderFuncs struct { + mi *MessageInfo + size func(p pointer, f *coderFieldInfo, opts marshalOptions) int + marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) + isInit func(p pointer, f *coderFieldInfo) error + merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) +} + +// valueCoderFuncs is a set of protoreflect.Value encoding functions. +type valueCoderFuncs struct { + size func(v pref.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) + isInit func(v pref.Value) error + merge func(dst, src pref.Value, opts mergeOptions) pref.Value +} + +// fieldCoder returns pointer functions for a field, used for operating on +// struct fields. +func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesSliceValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) + case pref.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types + // (Varint, Fixed32, or Fixed64 wire type) can be packed. + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } + case fd.Kind() == pref.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) + case fd.Kind() == pref.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) + case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesNoZeroValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + } + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + } + } + panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) +} + +// encoderFuncsForValue returns value functions for a field, used for +// extension values and map encoding. +func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { + switch { + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolSliceValue + case pref.EnumKind: + return coderEnumSliceValue + case pref.Int32Kind: + return coderInt32SliceValue + case pref.Sint32Kind: + return coderSint32SliceValue + case pref.Uint32Kind: + return coderUint32SliceValue + case pref.Int64Kind: + return coderInt64SliceValue + case pref.Sint64Kind: + return coderSint64SliceValue + case pref.Uint64Kind: + return coderUint64SliceValue + case pref.Sfixed32Kind: + return coderSfixed32SliceValue + case pref.Fixed32Kind: + return coderFixed32SliceValue + case pref.FloatKind: + return coderFloatSliceValue + case pref.Sfixed64Kind: + return coderSfixed64SliceValue + case pref.Fixed64Kind: + return coderFixed64SliceValue + case pref.DoubleKind: + return coderDoubleSliceValue + case pref.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue + case pref.BytesKind: + return coderBytesSliceValue + case pref.MessageKind: + return coderMessageSliceValue + case pref.GroupKind: + return coderGroupSliceValue + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolPackedSliceValue + case pref.EnumKind: + return coderEnumPackedSliceValue + case pref.Int32Kind: + return coderInt32PackedSliceValue + case pref.Sint32Kind: + return coderSint32PackedSliceValue + case pref.Uint32Kind: + return coderUint32PackedSliceValue + case pref.Int64Kind: + return coderInt64PackedSliceValue + case pref.Sint64Kind: + return coderSint64PackedSliceValue + case pref.Uint64Kind: + return coderUint64PackedSliceValue + case pref.Sfixed32Kind: + return coderSfixed32PackedSliceValue + case pref.Fixed32Kind: + return coderFixed32PackedSliceValue + case pref.FloatKind: + return coderFloatPackedSliceValue + case pref.Sfixed64Kind: + return coderSfixed64PackedSliceValue + case pref.Fixed64Kind: + return coderFixed64PackedSliceValue + case pref.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: + case pref.BoolKind: + return coderBoolValue + case pref.EnumKind: + return coderEnumValue + case pref.Int32Kind: + return coderInt32Value + case pref.Sint32Kind: + return coderSint32Value + case pref.Uint32Kind: + return coderUint32Value + case pref.Int64Kind: + return coderInt64Value + case pref.Sint64Kind: + return coderSint64Value + case pref.Uint64Kind: + return coderUint64Value + case pref.Sfixed32Kind: + return coderSfixed32Value + case pref.Fixed32Kind: + return coderFixed32Value + case pref.FloatKind: + return coderFloatValue + case pref.Sfixed64Kind: + return coderSfixed64Value + case pref.Fixed64Kind: + return coderFixed64Value + case pref.DoubleKind: + return coderDoubleValue + case pref.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue + case pref.BytesKind: + return coderBytesValue + case pref.MessageKind: + return coderMessageValue + case pref.GroupKind: + return coderGroupValue + } + } + panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go new file mode 100644 index 00000000..e118af1e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +// When using unsafe pointers, we can just treat enum values as int32s. + +var ( + coderEnumNoZero = coderInt32NoZero + coderEnum = coderInt32 + coderEnumPtr = coderInt32Ptr + coderEnumSlice = coderInt32Slice + coderEnumPackedSlice = coderInt32PackedSlice +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go new file mode 100644 index 00000000..36a90dff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -0,0 +1,467 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// unwrapper unwraps the value to the underlying value. +// This is implemented by List and Map. +type unwrapper interface { + protoUnwrap() interface{} +} + +// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. +type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. + PBValueOf(reflect.Value) pref.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. + GoValueOf(pref.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. + IsValidPB(pref.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool + + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. + New() pref.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. + Zero() pref.Value +} + +// NewConverter matches a Go type with a protobuf field and returns a Converter +// that converts between the two. Enums must be a named int32 kind that +// implements protoreflect.Enum, and messages must be pointer to a named +// struct type that implements protoreflect.ProtoMessage. +// +// This matcher deliberately supports a wider range of Go types than what +// protoc-gen-go historically generated to be able to automatically wrap some +// v1 messages generated by other forks of protoc-gen-go. +func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) + case fd.IsMap(): + return newMapConverter(t, fd) + default: + return newSingularConverter(t, fd) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +var ( + boolType = reflect.TypeOf(bool(false)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf(string("")) + bytesType = reflect.TypeOf([]byte(nil)) + byteType = reflect.TypeOf(byte(0)) +) + +var ( + boolZero = pref.ValueOfBool(false) + int32Zero = pref.ValueOfInt32(0) + int64Zero = pref.ValueOfInt64(0) + uint32Zero = pref.ValueOfUint32(0) + uint64Zero = pref.ValueOfUint64(0) + float32Zero = pref.ValueOfFloat32(0) + float64Zero = pref.ValueOfFloat64(0) + stringZero = pref.ValueOfString("") + bytesZero = pref.ValueOfBytes(nil) +) + +func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { + if fd.Cardinality() == pref.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { + case pref.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } + case pref.Uint32Kind, pref.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } + case pref.Uint64Kind, pref.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } + case pref.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } + case pref.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } + case pref.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } + case pref.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } + case pref.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } + case pref.MessageKind, pref.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type boolConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfBool(v.Bool()) +} +func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) +} +func (c *boolConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(bool) + return ok +} +func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *boolConverter) New() pref.Value { return c.def } +func (c *boolConverter) Zero() pref.Value { return c.def } + +type int32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt32(int32(v.Int())) +} +func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) +} +func (c *int32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int32) + return ok +} +func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int32Converter) New() pref.Value { return c.def } +func (c *int32Converter) Zero() pref.Value { return c.def } + +type int64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt64(int64(v.Int())) +} +func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) +} +func (c *int64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int64) + return ok +} +func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int64Converter) New() pref.Value { return c.def } +func (c *int64Converter) Zero() pref.Value { return c.def } + +type uint32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint32(uint32(v.Uint())) +} +func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) +} +func (c *uint32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint32) + return ok +} +func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint32Converter) New() pref.Value { return c.def } +func (c *uint32Converter) Zero() pref.Value { return c.def } + +type uint64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint64(uint64(v.Uint())) +} +func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) +} +func (c *uint64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint64) + return ok +} +func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint64Converter) New() pref.Value { return c.def } +func (c *uint64Converter) Zero() pref.Value { return c.def } + +type float32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat32(float32(v.Float())) +} +func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) +} +func (c *float32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float32) + return ok +} +func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float32Converter) New() pref.Value { return c.def } +func (c *float32Converter) Zero() pref.Value { return c.def } + +type float64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat64(float64(v.Float())) +} +func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) +} +func (c *float64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float64) + return ok +} +func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float64Converter) New() pref.Value { return c.def } +func (c *float64Converter) Zero() pref.Value { return c.def } + +type stringConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfString(v.Convert(stringType).String()) +} +func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) + if c.goType.Kind() == reflect.Slice && s == "" { + return reflect.Zero(c.goType) // ensure empty string is []byte(nil) + } + return reflect.ValueOf(s).Convert(c.goType) +} +func (c *stringConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(string) + return ok +} +func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *stringConverter) New() pref.Value { return c.def } +func (c *stringConverter) Zero() pref.Value { return c.def } + +type bytesConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { + return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } + return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) +} +func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) +} +func (c *bytesConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().([]byte) + return ok +} +func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *bytesConverter) New() pref.Value { return c.def } +func (c *bytesConverter) Zero() pref.Value { return c.def } + +type enumConverter struct { + goType reflect.Type + def pref.Value +} + +func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { + var def pref.Value + if fd.Cardinality() == pref.Repeated { + def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} +} + +func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfEnum(pref.EnumNumber(v.Int())) +} + +func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) +} + +func (c *enumConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(pref.EnumNumber) + return ok +} + +func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *enumConverter) New() pref.Value { + return c.def +} + +func (c *enumConverter) Zero() pref.Value { + return c.def +} + +type messageConverter struct { + goType reflect.Type +} + +func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} +} + +func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return pref.ValueOfMessage(m.ProtoReflect()) + } + return pref.ValueOfMessage(legacyWrapMessage(v)) +} + +func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if rv.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) + } + return rv +} + +func (c *messageConverter) IsValidPB(v pref.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + return rv.Type() == c.goType +} + +func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *messageConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *messageConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go new file mode 100644 index 00000000..6fccab52 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} + case t.Kind() == reflect.Slice: + return &listConverter{t, newSingularConverter(t.Elem(), fd)} + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type listConverter struct { + goType reflect.Type // []T + c Converter +} + +func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) + return pref.ValueOfList(&listReflect{pv, c.c}) +} + +func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) + } + return rv.Elem() +} + +func (c *listConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type().Elem() == c.goType +} + +func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listConverter) New() pref.Value { + return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +} + +func (c *listConverter) Zero() pref.Value { + return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +} + +type listPtrConverter struct { + goType reflect.Type // *[]T + c Converter +} + +func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfList(&listReflect{v, c.c}) +} + +func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { + return v.List().(*listReflect).v +} + +func (c *listPtrConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type() == c.goType +} + +func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listPtrConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *listPtrConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type listReflect struct { + v reflect.Value // *[]T + conv Converter +} + +func (ls *listReflect) Len() int { + if ls.v.IsNil() { + return 0 + } + return ls.v.Elem().Len() +} +func (ls *listReflect) Get(i int) pref.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) +} +func (ls *listReflect) Set(i int, v pref.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) +} +func (ls *listReflect) Append(v pref.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) +} +func (ls *listReflect) AppendMutable() pref.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } + v := ls.NewElement() + ls.Append(v) + return v +} +func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) +} +func (ls *listReflect) NewElement() pref.Value { + return ls.conv.New() +} +func (ls *listReflect) IsValid() bool { + return !ls.v.IsNil() +} +func (ls *listReflect) protoUnwrap() interface{} { + return ls.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go new file mode 100644 index 00000000..de06b259 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapConverter struct { + goType reflect.Type // map[K]V + keyConv, valConv Converter +} + +func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + return &mapConverter{ + goType: t, + keyConv: newSingularConverter(t.Key(), fd.MapKey()), + valConv: newSingularConverter(t.Elem(), fd.MapValue()), + } +} + +func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) +} + +func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { + return v.Map().(*mapReflect).v +} + +func (c *mapConverter) IsValidPB(v pref.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false + } + return mapv.v.Type() == c.goType +} + +func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *mapConverter) New() pref.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) +} + +func (c *mapConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type mapReflect struct { + v reflect.Value // map[K]V + keyConv Converter + valConv Converter +} + +func (ms *mapReflect) Len() int { + return ms.v.Len() +} +func (ms *mapReflect) Has(k pref.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() +} +func (ms *mapReflect) Get(k pref.MapKey) pref.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { + return pref.Value{} + } + return ms.valConv.PBValueOf(rv) +} +func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) +} +func (ms *mapReflect) Clear(k pref.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) +} +func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } + v := ms.Get(k) + if !v.IsValid() { + v = ms.NewValue() + ms.Set(k, v) + } + return v +} +func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() + v := ms.valConv.PBValueOf(iter.Value()) + if !f(k, v) { + return + } + } +} +func (ms *mapReflect) NewValue() pref.Value { + return ms.valConv.New() +} +func (ms *mapReflect) IsValid() bool { + return !ms.v.IsNil() +} +func (ms *mapReflect) protoUnwrap() interface{} { + return ms.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go new file mode 100644 index 00000000..85ba1d3b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -0,0 +1,274 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math/bits" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type unmarshalOptions struct { + flags protoiface.UnmarshalInputFlags + resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +func (o unmarshalOptions) Options() proto.UnmarshalOptions { + return proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + DiscardUnknown: o.DiscardUnknown(), + Resolver: o.resolver, + } +} + +func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } + +func (o unmarshalOptions) IsDefault() bool { + return o.flags == 0 && o.resolver == preg.GlobalTypes +} + +var lazyUnmarshalOptions = unmarshalOptions{ + resolver: preg.GlobalTypes, +} + +type unmarshalOutput struct { + n int // number of bytes consumed + initialized bool +} + +// unmarshal is protoreflect.Methods.Unmarshal. +func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + var flags piface.UnmarshalOutputFlags + if out.initialized { + flags |= piface.UnmarshalInitialized + } + return piface.UnmarshalOutput{ + Flags: flags, + }, err +} + +// errUnknown is returned during unmarshaling to indicate a parse error that +// should result in a field being placed in the unknown fields section (for example, +// when the wire type doesn't match) as opposed to the entire unmarshal operation +// failing (for example, when a field extends past the available input). +// +// This is a sentinel error which should never be visible to the user. +var errUnknown = errors.New("unknown") + +func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + mi.init() + if flags.ProtoLegacy && mi.isMessageSet { + return unmarshalMessageSet(mi, b, p, opts) + } + initialized := true + var requiredMask uint64 + var exts *map[int32]ExtensionField + start := len(b) + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := p.Apply(mi.unknownOffset).Bytes() + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} + +func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { + x := exts[int32(num)] + xt := x.Type() + if xt == nil { + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { + if err == preg.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) + } + } + xi := getExtensionFieldInfo(xt) + if xi.funcs.unmarshal == nil { + return out, errUnknown + } + if flags.LazyUnmarshalExtensions { + if opts.IsDefault() && x.canLazy(xt) { + out, valid := skipExtension(b, xi, num, wtyp, opts) + switch valid { + case ValidationValid: + if out.initialized { + x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) + exts[int32(num)] = x + return out, nil + } + case ValidationInvalid: + return out, errors.New("invalid wire format") + case ValidationUnknown: + } + } + } + ival := x.Value() + if !ival.IsValid() && xi.unmarshalNeedsValue { + // Create a new message, list, or map value to fill in. + // For enums, create a prototype value to let the unmarshal func know the + // concrete type. + ival = xt.New() + } + v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) + if err != nil { + return out, err + } + if xi.funcs.isInit == nil { + out.initialized = true + } + x.Set(xt, v) + exts[int32(num)] = x + return out, nil +} + +func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + if xi.validation.mi == nil { + return out, ValidationUnknown + } + xi.validation.mi.init() + switch xi.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(b, num, opts) + return out, st + default: + return out, ValidationUnknown + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go new file mode 100644 index 00000000..8c8a794c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -0,0 +1,199 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/internal/flags" + proto "google.golang.org/protobuf/proto" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type marshalOptions struct { + flags piface.MarshalInputFlags +} + +func (o marshalOptions) Options() proto.MarshalOptions { + return proto.MarshalOptions{ + AllowPartial: true, + Deterministic: o.Deterministic(), + UseCachedSize: o.UseCachedSize(), + } +} + +func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } +func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } + +// size is protoreflect.Methods.Size. +func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + size := mi.sizePointer(p, marshalOptions{ + flags: in.Flags, + }) + return piface.SizeOutput{Size: size} +} + +func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { + mi.init() + if p.IsNil() { + return 0 + } + if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { + return int(size) + } + } + return mi.sizePointerSlow(p, opts) +} + +func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { + if flags.ProtoLegacy && mi.isMessageSet { + size = sizeMessageSet(mi, p, opts) + if mi.sizecacheOffset.IsValid() { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + return size + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + size += mi.sizeExtensions(e, opts) + } + for _, f := range mi.orderedCoderFields { + if f.funcs.size == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + size += f.funcs.size(fptr, f, opts) + } + if mi.unknownOffset.IsValid() { + u := *p.Apply(mi.unknownOffset).Bytes() + size += len(u) + } + if mi.sizecacheOffset.IsValid() { + if size > math.MaxInt32 { + // The size is too large for the int32 sizecache field. + // We will need to recompute the size when encoding; + // unfortunately expensive, but better than invalid output. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + } else { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + } + return size +} + +// marshal is protoreflect.Methods.Marshal. +func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ + flags: in.Flags, + }) + return piface.MarshalOutput{Buf: b}, err +} + +func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { + mi.init() + if p.IsNil() { + return b, nil + } + if flags.ProtoLegacy && mi.isMessageSet { + return marshalMessageSet(mi, b, p, opts) + } + var err error + // The old marshaler encodes extensions at beginning. + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + // TODO: Special handling for MessageSet? + b, err = mi.appendExtensions(b, e, opts) + if err != nil { + return b, err + } + } + for _, f := range mi.orderedCoderFields { + if f.funcs.marshal == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + } + if mi.unknownOffset.IsValid() && !mi.isMessageSet { + u := *p.Apply(mi.unknownOffset).Bytes() + b = append(b, u...) + } + return b, nil +} + +func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { + if ext == nil { + return 0 + } + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + n += xi.funcs.size(x.Value(), xi.tagsize, opts) + } + return n +} + +func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { + if ext == nil { + return b, nil + } + + switch len(*ext) { + case 0: + return b, nil + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + var err error + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + } + return b, err + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(*ext)) + for k := range *ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + var err error + for _, k := range keys { + x := (*ext)[int32(k)] + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + if err != nil { + return b, err + } + } + return b, nil + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go new file mode 100644 index 00000000..8c1eab4b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type EnumInfo struct { + GoReflectType reflect.Type // int32 kind + Desc pref.EnumDescriptor +} + +func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +} +func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go new file mode 100644 index 00000000..e904fd99 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sync" + "sync/atomic" + + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ExtensionInfo implements ExtensionType. +// +// This type contains a number of exported fields for legacy compatibility. +// The only non-deprecated use of this type is through the methods of the +// ExtensionType interface. +type ExtensionInfo struct { + // An ExtensionInfo may exist in several stages of initialization. + // + // extensionInfoUninitialized: Some or all of the legacy exported + // fields may be set, but none of the unexported fields have been + // initialized. This is the starting state for an ExtensionInfo + // in legacy generated code. + // + // extensionInfoDescInit: The desc field is set, but other unexported fields + // may not be initialized. Legacy exported fields may or may not be set. + // This is the starting state for an ExtensionInfo in newly generated code. + // + // extensionInfoFullInit: The ExtensionInfo is fully initialized. + // This state is only entered after lazy initialization is complete. + init uint32 + mu sync.Mutex + + goType reflect.Type + desc extensionTypeDescriptor + conv Converter + info *extensionFieldInfo // for fast-path method implementations + + // ExtendedType is a typed nil-pointer to the parent message type that + // is being extended. It is possible for this to be unpopulated in v2 + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. + ExtendedType piface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // + // For historical reasons, reflect.TypeOf(ExtensionType) and the + // type returned by InterfaceOf may not be identical. + // + // Deprecated: Use InterfaceOf(xt.Zero()) instead. + ExtensionType interface{} + + // Field is the field number of the extension. + // + // Deprecated: Use the Descriptor().Number method instead. + Field int32 + + // Name is the fully qualified name of extension. + // + // Deprecated: Use the Descriptor().FullName method instead. + Name string + + // Tag is the protobuf struct tag used in the v1 API. + // + // Deprecated: Do not use. + Tag string + + // Filename is the proto filename in which the extension is defined. + // + // Deprecated: Use Descriptor().ParentFile().Path() instead. + Filename string +} + +// Stages of initialization: See the ExtensionInfo.init field. +const ( + extensionInfoUninitialized = 0 + extensionInfoDescInit = 1 + extensionInfoFullInit = 2 +) + +func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit +} + +func (xi *ExtensionInfo) New() pref.Value { + return xi.lazyInit().New() +} +func (xi *ExtensionInfo) Zero() pref.Value { + return xi.lazyInit().Zero() +} +func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { + return xi.lazyInit().GoValueOf(v).Interface() +} +func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { + return xi.lazyInit().IsValidPB(v) +} +func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } + return &xi.desc +} + +func (xi *ExtensionInfo) lazyInit() Converter { + if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { + xi.lazyInitSlow() + } + return xi.conv +} + +func (xi *ExtensionInfo) lazyInitSlow() { + xi.mu.Lock() + defer xi.mu.Unlock() + + if xi.init == extensionInfoFullInit { + return + } + defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) + + if xi.desc.ExtensionDescriptor == nil { + xi.initFromLegacy() + } + if !xi.desc.ExtensionDescriptor.IsPlaceholder() { + if xi.ExtensionType == nil { + xi.initToLegacy() + } + xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) + xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) + xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) + } +} + +type extensionTypeDescriptor struct { + pref.ExtensionDescriptor + xi *ExtensionInfo +} + +func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { + return xtd.xi +} +func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { + return xtd.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go new file mode 100644 index 00000000..f7d7ffb5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -0,0 +1,219 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// legacyEnumName returns the name of enums used in legacy code. +// It is neither the protobuf full name nor the qualified Go name, +// but rather an odd hybrid of both. +func legacyEnumName(ed pref.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { + protoPkg = string(fd.Package()) + enumName = strings.TrimPrefix(enumName, protoPkg+".") + } + if protoPkg == "" { + return strs.GoCamelCase(enumName) + } + return protoPkg + "." + strs.GoCamelCase(enumName) +} + +// legacyWrapEnum wraps v as a protoreflect.Enum, +// where v must be a int32 kind and not implement the v2 API already. +func legacyWrapEnum(v reflect.Value) pref.Enum { + et := legacyLoadEnumType(v.Type()) + return et.New(pref.EnumNumber(v.Int())) +} + +var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + +// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, +// where t must be an int32 kind and not implement the v2 API already. +func legacyLoadEnumType(t reflect.Type) pref.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { + return et.(pref.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. + var et pref.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { + return et.(pref.EnumType) + } + return et +} + +type legacyEnumType struct { + desc pref.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum +} + +func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { + if e, ok := t.m.Load(n); ok { + return e.(pref.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e +} +func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { + return t.desc +} + +type legacyEnumWrapper struct { + num pref.EnumNumber + pbTyp pref.EnumType + goTyp reflect.Type +} + +func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { + return e.pbTyp.Descriptor() +} +func (e *legacyEnumWrapper) Type() pref.EnumType { + return e.pbTyp +} +func (e *legacyEnumWrapper) Number() pref.EnumNumber { + return e.num +} +func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { + return e +} +func (e *legacyEnumWrapper) protoUnwrap() interface{} { + v := reflect.New(e.goTyp).Elem() + v.SetInt(int64(e.num)) + return v.Interface() +} + +var ( + _ pref.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) +) + +var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must be an int32 kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() + if _, ok := ev.(pref.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) + if !ok { + return aberrantLoadEnumDesc(t) + } + b, idxs := edV1.EnumDescriptor() + + var ed pref.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1 : len(idxs)-1] { + md = md.Messages().Get(i) + } + ed = md.Enums().Get(idxs[len(idxs)-1]) + } + if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must not implement protoreflect.Enum or enumV1. +// +// If the type does not implement enumV1, then there is no reliable +// way to derive the original protobuf type information. +// We are unable to use the global enum registry since it is +// unfortunately keyed by the protobuf full name, which we also do not know. +// Thus, this produces some bogus enum descriptor based on the Go type name. +func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. + ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} + ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum + ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) + + // TODO: Use the presence of a UnmarshalJSON method to determine proto2? + + vd := &ed.L2.Values.List[0] + vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN + vd.L0.ParentFile = ed.L0.ParentFile + vd.L0.Parent = ed + + // TODO: We could use the String method to obtain some enum value names by + // starting at 0 and print the enum until it produces invalid identifiers. + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(pref.EnumDescriptor) + } + return ed +} + +// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type +// The provided name is not guaranteed to be stable nor universally unique. +// It should be sufficiently unique within a program. +// +// This is exported for testing purposes. +func AberrantDeriveFullName(t reflect.Type) pref.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': + return '.' + case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': + return r + default: + return '_' + } + } + prefix := strings.Map(sanitize, t.PkgPath()) + suffix := strings.Map(sanitize, t.Name()) + if suffix == "" { + suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) + } + + ss := append(strings.Split(prefix, "."), suffix) + for i, s := range ss { + if s == "" || ('0' <= s[0] && s[0] <= '9') { + ss[i] = "x" + s + } + } + return pref.FullName(strings.Join(ss, ".")) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go new file mode 100644 index 00000000..c3d741c2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "encoding/binary" + "encoding/json" + "hash/crc32" + "math" + "reflect" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// These functions exist to support exported APIs in generated protobufs. +// While these are deprecated, they cannot be removed for compatibility reasons. + +// LegacyEnumName returns the name of enums used in legacy code. +func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { + return legacyEnumName(ed) +} + +// LegacyMessageTypeOf returns the protoreflect.MessageType for m, +// with name used as the message name if necessary. +func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageInfo(reflect.TypeOf(m), name) +} + +// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. +// The input can either be a string representing the enum value by name, +// or a number representing the enum number itself. +func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { + if b[0] == '"' { + var name pref.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + ev := ed.Values().ByName(name) + if ev == nil { + return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) + } + return ev.Number(), nil + } else { + var num pref.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + return num, nil + } +} + +// CompressGZIP compresses the input as a GZIP-encoded file. +// The current implementation does no compression. +func (Export) CompressGZIP(in []byte) (out []byte) { + // RFC 1952, section 2.3.1. + var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} + + // RFC 1951, section 3.2.4. + var blockHeader [5]byte + const maxBlockSize = math.MaxUint16 + numBlocks := 1 + len(in)/maxBlockSize + + // RFC 1952, section 2.3.1. + var gzipFooter [8]byte + binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) + binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) + + // Encode the input without compression using raw DEFLATE blocks. + out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) + out = append(out, gzipHeader[:]...) + for blockHeader[0] == 0 { + blockSize := maxBlockSize + if blockSize > len(in) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) + binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] + } + out = append(out, gzipFooter[:]...) + return out +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go new file mode 100644 index 00000000..61757ce5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -0,0 +1,175 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/messageset" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc + var parent piface.MessageV1 + messageName := xd.ContainingMessage().FullName() + if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) + if mv, ok := mv.(unwrapper); ok { + t = reflect.TypeOf(mv.protoUnwrap()) + } + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() + if mz, ok := mz.(piface.MessageV1); ok { + parent = mz + } + } + + // Determine the v1 extension type, which is unfortunately not the same as + // the v2 ExtensionType.GoType. + extType := xi.goType + switch extType.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields + } + + // Reconstruct the legacy enum full name. + var enumName string + if xd.Kind() == pref.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + + // Derive the proto file that the extension was declared within. + var filename string + if fd := xd.ParentFile(); fd != nil { + filename = fd.Path() + } + + // For MessageSet extensions, the name used is the parent message. + name := xd.FullName() + if messageset.IsMessageSetExtension(xd) { + name = name.Parent() + } + + xi.ExtendedType = parent + xi.ExtensionType = reflect.Zero(extType).Interface() + xi.Field = int32(xd.Number()) + xi.Name = string(name) + xi.Tag = ptag.Marshal(xd, enumName) + xi.Filename = filename +} + +// initFromLegacy initializes an ExtensionInfo from +// the contents of the deprecated exported fields of the type. +func (xi *ExtensionInfo) initFromLegacy() { + // The v1 API returns "type incomplete" descriptors where only the + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ + name: pref.FullName(xi.Name), + number: pref.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. + var ed pref.EnumDescriptor + var md pref.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) + case pref.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. + var evs pref.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } + fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) + + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 + xd.L0.FullName = pref.FullName(xi.Name) + xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L2.IsPacked = fd.L1.IsPacked + xd.L2.Default = fd.L1.Default + xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) + xd.L2.Enum = ed + xd.L2.Message = md + + // Derive real extension field name for MessageSets. + if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { + xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) + } + + tt := reflect.TypeOf(xi.ExtensionType) + if isOptional { + tt = tt.Elem() + } + xi.goType = tt + xi.desc = extensionTypeDescriptor{xd, xi} +} + +type placeholderExtension struct { + name pref.FullName + number pref.FieldNumber +} + +func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } +func (x placeholderExtension) Parent() pref.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() pref.Syntax { return 0 } +func (x placeholderExtension) Name() pref.Name { return x.name.Name() } +func (x placeholderExtension) FullName() pref.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() pref.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } +func (x placeholderExtension) Kind() pref.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() pref.Value { return pref.Value{} } +func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } +func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go new file mode 100644 index 00000000..9ab09108 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 +// on February 25th, 2016 has had a method to get the raw descriptor. +// Types that were not generated by protoc-gen-go or were generated prior +// to that version are not supported. +// +// The []byte returned is the encoded form of a FileDescriptorProto message +// compressed using GZIP. The []int is the path from the top-level file +// to the specific message or enum declaration. +type ( + enumV1 interface { + EnumDescriptor() ([]byte, []int) + } + messageV1 interface { + Descriptor() ([]byte, []int) + } +) + +var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor + +// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. +// +// This assumes that b is immutable and that b does not refer to part of a +// concatenated series of GZIP files (which would require shenanigans that +// rely on the concatenation properties of both protobufs and GZIP). +// File descriptors generated by protoc-gen-go do not rely on that property. +func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { + // Fast-path: check whether we already have a cached file descriptor. + if fd, ok := legacyFileDescCache.Load(&b[0]); ok { + return fd.(protoreflect.FileDescriptor) + } + + // Slow-path: decompress and unmarshal the file descriptor proto. + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + panic(err) + } + b2, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + + fd := filedesc.Builder{ + RawDescriptor: b2, + FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry + }.Build().File + if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { + return fd.(protoreflect.FileDescriptor) + } + return fd +} + +type resolverOnly struct { + reg *protoregistry.Files +} + +func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return r.reg.FindFileByPath(path) +} +func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return r.reg.FindDescriptorByName(name) +} +func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go new file mode 100644 index 00000000..06c68e11 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -0,0 +1,502 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descopts" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// legacyWrapMessage wraps v as a protoreflect.Message, +// where v must be a *struct kind and not implement the v2 API already. +func legacyWrapMessage(v reflect.Value) pref.Message { + typ := v.Type() + if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} + } + mt := legacyLoadMessageInfo(typ, "") + return mt.MessageOf(v.Interface()) +} + +var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + +// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, +// where t must be a *struct kind and not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) + } + + // Slow-path: derive message descriptor and initialize MessageInfo. + mi := &MessageInfo{ + Desc: legacyLoadMessageDesc(t, name), + GoReflectType: t, + } + + v := reflect.Zero(t).Interface() + if _, ok := v.(legacyMarshaler); ok { + mi.methods.Marshal = legacyMarshal + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + mi.methods.Flags |= piface.SupportMarshalDeterministic + } + if _, ok := v.(legacyUnmarshaler); ok { + mi.methods.Unmarshal = legacyUnmarshal + } + if _, ok := v.(legacyMerger); ok { + mi.methods.Merge = legacyMerge + } + + if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { + return mi.(*MessageInfo) + } + return mi +} + +var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor + +// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must be a *struct kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { + return legacyLoadMessageDesc(t, "") +} +func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { + return mi.(pref.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() + if _, ok := mv.(pref.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) + if !ok { + return aberrantLoadMessageDesc(t, name) + } + + // If this is a dynamic message type where there isn't a 1-1 mapping between + // Go and protobuf types, calling the Descriptor method on the zero value of + // the message type isn't likely to work. If it panics, swallow the panic and + // continue as if the Descriptor method wasn't present. + b, idxs := func() ([]byte, []int) { + defer func() { + recover() + }() + return mdV1.Descriptor() + }() + if b == nil { + return aberrantLoadMessageDesc(t, name) + } + + // If the Go type has no fields, then this might be a proto3 empty message + // from before the size cache was added. If there are any fields, check to + // see that at least one of them looks like something we generated. + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) + } + } + + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1:] { + md = md.Messages().Get(i) + } + if name != "" && md.FullName() != name { + panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) + } + if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { + return md.(protoreflect.MessageDescriptor) + } + return md +} + +var ( + aberrantMessageDescLock sync.Mutex + aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor +) + +// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must not implement protoreflect.ProtoMessage or messageV1. +// +// This is a best-effort derivation of the message descriptor using the protobuf +// tags on the struct fields. +func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { + aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) + } + return aberrantLoadMessageDescReentrant(t, name) +} +func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md + } + + // Slow-path: construct a descriptor from the Go struct type (best-effort). + // Cache the MessageDescriptor early on so that we can resolve internal + // cyclic references. + md := &filedesc.Message{L2: new(filedesc.MessageL2)} + md.L0.FullName = aberrantDeriveMessageName(t, name) + md.L0.ParentFile = filedesc.SurrogateProto2 + aberrantMessageDescCache[t] = md + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return md + } + + // Try to determine if the message is using proto3 by checking scalars. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + switch f.Type.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + md.L0.ParentFile = filedesc.SurrogateProto3 + } + for _, s := range strings.Split(tag, ",") { + if s == "proto3" { + md.L0.ParentFile = filedesc.SurrogateProto3 + } + } + } + } + + // Obtain a list of oneof wrapper types. + var oneofWrappers []reflect.Type + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := t.MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } + } + } + } + } + + // Obtain a list of the extension ranges. + if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ + pref.FieldNumber(v.FieldByName("Start").Int()), + pref.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } + } + + // Derive the message fields by inspecting the struct fields. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + tagKey := f.Tag.Get("protobuf_key") + tagVal := f.Tag.Get("protobuf_val") + aberrantAppendField(md, f.Type, tag, tagKey, tagVal) + } + if tag := f.Tag.Get("protobuf_oneof"); tag != "" { + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] + od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L0.Parent = md + od.L0.Index = n + + for _, t := range oneofWrappers { + if t.Implements(f.Type) { + f := t.Elem().Field(0) + if tag := f.Tag.Get("protobuf"); tag != "" { + aberrantAppendField(md, f.Type, tag, "", "") + fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] + fd.L1.ContainingOneof = od + od.L1.Fields.List = append(od.L1.Fields.List, fd) + } + } + } + } + } + + return md +} + +func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { + name = pref.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { + return name + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return AberrantDeriveFullName(t) +} + +func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { + t := goType + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) + + // Append field descriptor to the message. + n := len(md.L2.Fields.List) + md.L2.Fields.List = append(md.L2.Fields.List, *fd) + fd = &md.L2.Fields.List[n] + fd.L0.FullName = md.FullName().Append(fd.Name()) + fd.L0.ParentFile = md.L0.ParentFile + fd.L0.Parent = md + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.HasPacked { + fd.L1.Options = func() pref.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) + } + if fd.L1.HasPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + } + return opts.Interface() + } + } + + // Populate Enum and Message. + if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } + if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { + case pref.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) + default: + if t.Kind() == reflect.Map { + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] + md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + + md2.L1.IsMapEntry = true + md2.L2.Options = func() pref.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() + } + + aberrantAppendField(md2, t.Key(), tagKey, "", "") + aberrantAppendField(md2, t.Elem(), tagVal, "", "") + + fd.L1.Message = md2 + break + } + fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") + } + } +} + +type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors +} + +func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +} + +// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +type legacyMarshaler interface { + Marshal() ([]byte, error) +} + +// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. +type legacyUnmarshaler interface { + Unmarshal([]byte) error +} + +// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. +type legacyMerger interface { + Merge(protoiface.MessageV1) +} + +var legacyProtoMethods = &piface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + Flags: piface.SupportMarshalDeterministic, +} + +func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { + return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } + return piface.MarshalOutput{ + Buf: out, + }, err +} + +func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { + return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) +} + +func legacyMerge(in piface.MergeInput) piface.MergeOutput { + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if !ok { + return piface.MergeOutput{} + } + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +// aberrantMessageType implements MessageType for all types other than pointer-to-struct. +type aberrantMessageType struct { + t reflect.Type +} + +func (mt aberrantMessageType) New() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) Zero() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t +} +func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) +} + +// aberrantMessage implements Message for all types other than pointer-to-struct. +// +// When the underlying type implements legacyMarshaler or legacyUnmarshaler, +// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is +// not much that can be done with values of this type. +type aberrantMessage struct { + v reflect.Value +} + +func (m aberrantMessage) ProtoReflect() pref.Message { + return m +} + +func (m aberrantMessage) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) +} +func (m aberrantMessage) Type() pref.MessageType { + return aberrantMessageType{m.v.Type()} +} +func (m aberrantMessage) New() pref.Message { + return aberrantMessage{reflect.Zero(m.v.Type())} +} +func (m aberrantMessage) Interface() pref.ProtoMessage { + return m +} +func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +} +func (m aberrantMessage) Has(pref.FieldDescriptor) bool { + panic("invalid field descriptor") +} +func (m aberrantMessage) Clear(pref.FieldDescriptor) { + panic("invalid field descriptor") +} +func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { + panic("invalid field descriptor") +} +func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { + panic("invalid oneof descriptor") +} +func (m aberrantMessage) GetUnknown() pref.RawFields { + return nil +} +func (m aberrantMessage) SetUnknown(pref.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. +} +func (m aberrantMessage) IsValid() bool { + // An invalid message is a read-only, empty message. Since we don't know anything + // about the alleged contents of this message, we can't say with confidence that + // it is invalid in this sense. Therefore, report it as valid. + return true +} +func (m aberrantMessage) ProtoMethods() *piface.Methods { + return legacyProtoMethods +} +func (m aberrantMessage) protoUnwrap() interface{} { + return m.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go new file mode 100644 index 00000000..cdc4267d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type mergeOptions struct{} + +func (o mergeOptions) Merge(dst, src proto.Message) { + proto.Merge(dst, src) +} + +// merge is protoreflect.Methods.Merge. +func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { + return piface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { + return piface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + mi.init() + if dst.IsNil() { + panic(fmt.Sprintf("invalid value: merging into nil message")) + } + if src.IsNil() { + return + } + for _, f := range mi.orderedCoderFields { + if f.funcs.merge == nil { + continue + } + sfptr := src.Apply(f.offset) + if f.isPointer && sfptr.Elem().IsNil() { + continue + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + } + if mi.extensionOffset.IsValid() { + sext := src.Apply(mi.extensionOffset).Extensions() + dext := dst.Apply(mi.extensionOffset).Extensions() + if *dext == nil { + *dext = make(map[int32]ExtensionField) + } + for num, sx := range *sext { + xt := sx.Type() + xi := getExtensionFieldInfo(xt) + if xi.funcs.merge == nil { + continue + } + dx := (*dext)[num] + var dv pref.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } + if !dv.IsValid() && xi.unmarshalNeedsValue { + dv = xt.New() + } + dv = xi.funcs.merge(dv, sx.Value(), opts) + dx.Set(sx.Type(), dv) + (*dext)[num] = dx + } + } + if mi.unknownOffset.IsValid() { + du := dst.Apply(mi.unknownOffset).Bytes() + su := src.Apply(mi.unknownOffset).Bytes() + if len(*su) > 0 { + *du = append(*du, *su...) + } + } +} + +func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return src +} + +func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +} + +func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + dstl.Append(srcl.Get(i)) + } + return dst +} + +func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) + dstl.Append(pref.ValueOfBytes(db)) + } + return dst +} + +func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() + dstl.Append(pref.ValueOfMessage(dm)) + } + return dst +} + +func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst +} + +func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + if f.mi != nil { + if dst.Elem().IsNil() { + dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dst.Elem(), src.Elem(), opts) + } else { + dm := dst.AsValueOf(f.ft).Elem() + sm := src.AsValueOf(f.ft).Elem() + if dm.IsNil() { + dm.Set(reflect.New(f.ft.Elem())) + } + opts.Merge(asMessage(dm), asMessage(sm)) + } +} + +func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + for _, sp := range src.PointerSlice() { + dm := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(dm), sp, opts) + } else { + opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) + } + dst.AppendPointerSlice(pointerOfValue(dm)) + } +} + +func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) +} + +func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bytes() + if len(v) > 0 { + *dst.Bytes() = append(emptyBuf[:], v...) + } +} + +func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BytesSlice() + for _, v := range *src.BytesSlice() { + *ds = append(*ds, append(emptyBuf[:], v...)) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go new file mode 100644 index 00000000..8816c274 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import () + +func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bool() = *src.Bool() +} + +func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bool() + if v != false { + *dst.Bool() = v + } +} + +func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.BoolPtr() + if p != nil { + v := *p + *dst.BoolPtr() = &v + } +} + +func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BoolSlice() + ss := src.BoolSlice() + *ds = append(*ds, *ss...) +} + +func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int32() = *src.Int32() +} + +func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int32() + if v != 0 { + *dst.Int32() = v + } +} + +func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int32Ptr() + if p != nil { + v := *p + *dst.Int32Ptr() = &v + } +} + +func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int32Slice() + ss := src.Int32Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint32() = *src.Uint32() +} + +func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint32() + if v != 0 { + *dst.Uint32() = v + } +} + +func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint32Ptr() + if p != nil { + v := *p + *dst.Uint32Ptr() = &v + } +} + +func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint32Slice() + ss := src.Uint32Slice() + *ds = append(*ds, *ss...) +} + +func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int64() = *src.Int64() +} + +func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int64() + if v != 0 { + *dst.Int64() = v + } +} + +func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int64Ptr() + if p != nil { + v := *p + *dst.Int64Ptr() = &v + } +} + +func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int64Slice() + ss := src.Int64Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint64() = *src.Uint64() +} + +func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint64() + if v != 0 { + *dst.Uint64() = v + } +} + +func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint64Ptr() + if p != nil { + v := *p + *dst.Uint64Ptr() = &v + } +} + +func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint64Slice() + ss := src.Uint64Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float32() = *src.Float32() +} + +func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float32() + if v != 0 { + *dst.Float32() = v + } +} + +func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float32Ptr() + if p != nil { + v := *p + *dst.Float32Ptr() = &v + } +} + +func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float32Slice() + ss := src.Float32Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float64() = *src.Float64() +} + +func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float64() + if v != 0 { + *dst.Float64() = v + } +} + +func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float64Ptr() + if p != nil { + v := *p + *dst.Float64Ptr() = &v + } +} + +func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float64Slice() + ss := src.Float64Slice() + *ds = append(*ds, *ss...) +} + +func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.String() = *src.String() +} + +func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.String() + if v != "" { + *dst.String() = v + } +} + +func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.StringPtr() + if p != nil { + v := *p + *dst.StringPtr() = &v + } +} + +func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.StringSlice() + ss := src.StringSlice() + *ds = append(*ds, *ss...) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go new file mode 100644 index 00000000..c026a981 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -0,0 +1,215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// MessageInfo provides protobuf related functionality for a given Go type +// that represents a message. A given instance of MessageInfo is tied to +// exactly one Go type, which must be a pointer to a struct type. +// +// The exported fields must be populated before any methods are called +// and cannot be mutated after set. +type MessageInfo struct { + // GoReflectType is the underlying message Go type and must be populated. + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. + Desc pref.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. + Exporter exporter + + // OneofWrappers is list of pointers to oneof wrapper struct types. + OneofWrappers []interface{} + + initMu sync.Mutex // protects all unexported fields + initDone uint32 + + reflectMessageInfo // for reflection implementation + coderMessageInfo // for fast-path method implementations +} + +// exporter is a function that returns a reference to the ith field of v, +// where v is a pointer to a struct. It returns nil if it does not support +// exporting the requested field (e.g., already exported). +type exporter func(v interface{}, i int) interface{} + +// getMessageInfo returns the MessageInfo for any message type that +// is generated by our implementation of protoc-gen-go (for v2 and on). +// If it is unable to obtain a MessageInfo, it returns nil. +func getMessageInfo(mt reflect.Type) *MessageInfo { + m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + if !ok { + return nil + } + mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) + if !ok { + return nil + } + return mr.ProtoMessageInfo() +} + +func (mi *MessageInfo) init() { + // This function is called in the hot path. Inline the sync.Once logic, + // since allocating a closure for Once.Do is expensive. + // Keep init small to ensure that it can be inlined. + if atomic.LoadUint32(&mi.initDone) == 0 { + mi.initOnce() + } +} + +func (mi *MessageInfo) initOnce() { + mi.initMu.Lock() + defer mi.initMu.Unlock() + if mi.initDone == 1 { + return + } + + t := mi.GoReflectType + if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("got %v, want *struct kind", t)) + } + t = t.Elem() + + si := mi.makeStructInfo(t) + mi.makeReflectFuncs(t, si) + mi.makeCoderMethods(t, si) + + atomic.StoreUint32(&mi.initDone, 1) +} + +// getPointer returns the pointer for a message, which should be of +// the type of the MessageInfo. If the message is of a different type, +// it returns ok==false. +func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi + case *messageReflectWrapper: + return m.pointer(), m.messageInfo() == mi + } + return pointer{}, false +} + +type ( + SizeCache = int32 + WeakFields = map[int32]protoreflect.ProtoMessage + UnknownFields = []byte + ExtensionFields = map[int32]ExtensionField +) + +var ( + sizecacheType = reflect.TypeOf(SizeCache(0)) + weakFieldsType = reflect.TypeOf(WeakFields(nil)) + unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) +) + +type structInfo struct { + sizecacheOffset offset + weakOffset offset + unknownOffset offset + extensionOffset offset + + fieldsByNumber map[pref.FieldNumber]reflect.StructField + oneofsByName map[pref.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]pref.FieldNumber + oneofWrappersByNumber map[pref.FieldNumber]reflect.Type +} + +func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + si := structInfo{ + sizecacheOffset: invalidOffset, + weakOffset: invalidOffset, + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + + fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, + oneofsByName: map[pref.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, + oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + } + +fieldLoop: + for i := 0; i < t.NumField(); i++ { + switch f := t.Field(i); f.Name { + case genid.SizeCache_goname, genid.SizeCacheA_goname: + if f.Type == sizecacheType { + si.sizecacheOffset = offsetOf(f, mi.Exporter) + } + case genid.WeakFields_goname, genid.WeakFieldsA_goname: + if f.Type == weakFieldsType { + si.weakOffset = offsetOf(f, mi.Exporter) + } + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: + if f.Type == unknownFieldsType { + si.unknownOffset = offsetOf(f, mi.Exporter) + } + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: + if f.Type == extensionFieldsType { + si.extensionOffset = offsetOf(f, mi.Exporter) + } + default: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.fieldsByNumber[pref.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { + si.oneofsByName[pref.Name(s)] = f + continue fieldLoop + } + } + } + + // Derive a mapping of oneof wrappers to fields. + oneofWrappers := mi.OneofWrappers + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs + } + } + } + } + for _, v := range oneofWrappers { + tf := reflect.TypeOf(v).Elem() + f := tf.Field(0) + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.oneofWrappersByType[tf] = pref.FieldNumber(n) + si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + break + } + } + } + + return si +} + +func (mi *MessageInfo) New() protoreflect.Message { + return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) +} +func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +} +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go new file mode 100644 index 00000000..0f4b8db7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -0,0 +1,364 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type reflectMessageInfo struct { + fields map[pref.FieldNumber]*fieldInfo + oneofs map[pref.Name]*oneofInfo + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) + // It provides faster access to the fieldInfo, but may be incomplete. + denseFields []*fieldInfo + + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []interface{} // either *fieldInfo or *oneofInfo + + getUnknown func(pointer) pref.RawFields + setUnknown func(pointer, pref.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +} + +// makeReflectFuncs generates the set of functions to support reflection. +func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + mi.makeKnownFieldsFunc(si) + mi.makeUnknownFieldsFunc(t, si) + mi.makeExtensionFieldsFunc(t, si) +} + +// makeKnownFieldsFunc generates functions for operations that can be performed +// on each protobuf message field. It takes in a reflect.Type representing the +// Go struct and matches message fields with struct fields. +// +// This code assumes that the struct is well-formed and panics if there are +// any discrepancies. +func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields = map[pref.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = fieldInfoForMap(fd, fs, mi.Exporter) + case fd.IsList(): + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsWeak(): + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = fieldInfoForScalar(fd, fs, mi.Exporter) + } + mi.fields[fd.Number()] = &fi + } + + mi.oneofs = map[pref.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } +} + +func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + mi.getUnknown = func(pointer) pref.RawFields { return nil } + mi.setUnknown = func(pointer, pref.RawFields) { return } + if si.unknownOffset.IsValid() { + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) + return pref.RawFields(*rv.Interface().(*[]byte)) + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) + *rv.Interface().(*[]byte) = []byte(b) + } + } else { + mi.getUnknown = func(pointer) pref.RawFields { + return nil + } + mi.setUnknown = func(p pointer, _ pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + } + } +} + +func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { + if si.extensionOffset.IsValid() { + mi.extensionMap = func(p pointer) *extensionMap { + if p.IsNil() { + return (*extensionMap)(nil) + } + v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) + return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) + } + } else { + mi.extensionMap = func(pointer) *extensionMap { + return (*extensionMap)(nil) + } + } +} + +type extensionMap map[int32]ExtensionField + +func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() + v := x.Value() + if xd.IsList() && v.List().Len() == 0 { + continue + } + if !f(xd, v) { + return + } + } + } +} +func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { + if m == nil { + return false + } + xd := xt.TypeDescriptor() + x, ok := (*m)[int32(xd.Number())] + if !ok { + return false + } + switch { + case xd.IsList(): + return x.Value().List().Len() > 0 + case xd.IsMap(): + return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() + } + return true +} +func (m *extensionMap) Clear(xt pref.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) +} +func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + } + return xt.Zero() +} +func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { + xd := xt.TypeDescriptor() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + } + + if *m == nil { + *m = make(map[int32]ExtensionField) + } + var x ExtensionField + x.Set(xt, v) + (*m)[int32(xd.Number())] = x +} +func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + v := xt.New() + m.Set(xt, v) + return v +} + +// MessageState is a data structure that is nested as the first field in a +// concrete message. It provides a way to implement the ProtoReflect method +// in an allocation-free way without needing to have a shadow Go type generated +// for every message type. This technique only works using unsafe. +// +// +// Example generated code: +// +// type M struct { +// state protoimpl.MessageState +// +// Field1 int32 +// Field2 string +// Field3 *BarMessage +// ... +// } +// +// func (m *M) ProtoReflect() protoreflect.Message { +// mi := &file_fizz_buzz_proto_msgInfos[5] +// if protoimpl.UnsafeEnabled && m != nil { +// ms := protoimpl.X.MessageStateOf(Pointer(m)) +// if ms.LoadMessageInfo() == nil { +// ms.StoreMessageInfo(mi) +// } +// return ms +// } +// return mi.MessageOf(m) +// } +// +// The MessageState type holds a *MessageInfo, which must be atomically set to +// the message info associated with a given message instance. +// By unsafely converting a *M into a *MessageState, the MessageState object +// has access to all the information needed to implement protobuf reflection. +// It has access to the message info as its first field, and a pointer to the +// MessageState is identical to a pointer to the concrete message value. +// +// +// Requirements: +// • The type M must implement protoreflect.ProtoMessage. +// • The address of m must not be nil. +// • The address of m and the address of m.state must be equal, +// even though they are different Go types. +type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare + pragma.DoNotCopy + + atomicMessageInfo *MessageInfo +} + +type messageState MessageState + +var ( + _ pref.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) +) + +// messageDataType is a tuple of a pointer to the message data and +// a pointer to the message type. It is a generalized way of providing a +// reflective view over a message instance. The disadvantage of this approach +// is the need to allocate this tuple of 16B. +type messageDataType struct { + p pointer + mi *MessageInfo +} + +type ( + messageReflectWrapper messageDataType + messageIfaceWrapper messageDataType +) + +var ( + _ pref.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) +) + +// MessageOf returns a reflective view over a message. The input must be a +// pointer to a named Go struct. If the provided type has a ProtoReflect method, +// it must be implemented by calling this method. +func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { + // TODO: Switch the input to be an opaque Pointer. + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } + p := pointerOfIface(m) + if p.IsNil() { + return mi.nilMessage.Init(mi) + } + return &messageReflectWrapper{p, mi} +} + +func (m *messageReflectWrapper) pointer() pointer { return m.p } +func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } + +func (m *messageIfaceWrapper) ProtoReflect() pref.Message { + return (*messageReflectWrapper)(m) +} +func (m *messageIfaceWrapper) protoUnwrap() interface{} { + return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) +} + +// checkField verifies that the provided field descriptor is valid. +// Exactly one of the returned values is populated. +func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] + } else { + fi = mi.fields[n] + } + if fi != nil { + if fi.fieldDesc != fd { + if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { + panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) + } + panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) + } + return fi, nil + } + + if fd.IsExtension() { + if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { + // TODO: Should this be exact containing message descriptor match? + panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) + } + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } + xtd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return nil, xtd.Type() + } + panic(fmt.Sprintf("field %v is invalid", fd.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go new file mode 100644 index 00000000..23124a86 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -0,0 +1,466 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "sync" + + "google.golang.org/protobuf/internal/flags" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +type fieldInfo struct { + fieldDesc pref.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) + get func(pointer) pref.Value + set func(pointer, pref.Value) + mutable func(pointer) pref.Value + newMessage func() pref.Message + newField func() pref.Value +} + +func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) + } + if ot.Kind() != reflect.Struct { + panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) + } + if !reflect.PtrTo(ot).Implements(ft) { + panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) + } + conv := NewConverter(ot.Field(0).Type, fd) + isMessage := fd.Message() != nil + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + // NOTE: The logic below intentionally assumes that oneof fields are + // well-formatted. That is, the oneof interface never contains a + // typed nil pointer to one of the wrapper structs. + + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return false + } + return true + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot { + // NOTE: We intentionally don't check for rv.Elem().IsNil() + // so that (*OneofWrapperType)(nil) gets cleared to nil. + return + } + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return conv.Zero() + } + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, + mutable: func(p pointer) pref.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + if rv.IsNil() { + rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +var ( + nilBytes = reflect.ValueOf([]byte(nil)) + emptyBytes = reflect.ValueOf([]byte{}) +) + +func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + if nullable { + if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + } + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + return !rv.IsNil() + } + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + if rv.IsNil() { + return conv.Zero() + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + rv.Set(conv.GoValueOf(v)) + if isBytes && rv.Len() == 0 { + if nullable { + rv.Set(emptyBytes) // preserve presence + } else { + rv.Set(nilBytes) // do not preserve presence + } + } + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } + }) + } + + num := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + _, ok := p.Apply(weakOffset).WeakFields().get(num) + return ok + }, + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, + get: func(p pointer) pref.Value { + lazyInit() + if p.IsNil() { + return pref.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { + return pref.ValueOfMessage(messageType.Zero()) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + set: func(p pointer, v pref.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { + if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) + } + panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, + mutable: func(p pointer) pref.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) + if !ok { + m = messageType.New().Interface() + fs.set(num, m) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + newMessage: func() pref.Message { + lazyInit() + return messageType.New() + }, + newField: func() pref.Value { + lazyInit() + return pref.ValueOfMessage(messageType.New()) + }, + } +} + +func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return !rv.IsNil() + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, + mutable: func(p pointer) pref.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +type oneofInfo struct { + oneofDesc pref.OneofDescriptor + which func(pointer) pref.FieldNumber +} + +func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + return 0 + } + rv = rv.Elem() + if rv.IsNil() { + return 0 + } + return si.oneofWrappersByType[rv.Type().Elem()] + } + } + return oi +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go new file mode 100644 index 00000000..741d6e5b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (m *messageState) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageState) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageState) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageState) Interface() protoreflect.ProtoMessage { + return m.protoUnwrap().(protoreflect.ProtoMessage) +} +func (m *messageState) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageState) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageState) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageState) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageState) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageState) IsValid() bool { + return !m.pointer().IsNil() +} + +func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageReflectWrapper) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageReflectWrapper) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { + if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { + return m + } + return (*messageIfaceWrapper)(m) +} +func (m *messageReflectWrapper) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageReflectWrapper) IsValid() bool { + return !m.pointer().IsNil() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go new file mode 100644 index 00000000..67b4ede6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "fmt" + "reflect" + "sync" +) + +const UnsafeEnabled = false + +// Pointer is an opaque pointer type. +type Pointer interface{} + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the field index into a struct. +type offset struct { + index int + export exporter +} + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + if len(f.Index) != 1 { + panic("embedded structs are not supported") + } + if f.PkgPath == "" { + return offset{index: f.Index[0]} // field is already exported + } + if x == nil { + panic("exporter must be provided for unexported field") + } + return offset{index: f.Index[0], export: x} +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f.index >= 0 } + +// invalidOffset is an invalid field offset. +var invalidOffset = offset{index: -1} + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset{index: 0} + +// pointer is an abstract representation of a pointer to a struct or field. +type pointer struct{ v reflect.Value } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointerOfIface(p) +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{v: v} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + return pointer{v: reflect.ValueOf(v)} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.v.IsNil() +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The current pointer must be pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if f.export != nil { + if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { + return pointer{v: v} + } + } + return pointer{v: p.v.Elem().Field(f.index).Addr()} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + if got := p.v.Type().Elem(); got != t { + panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) + } + return p.v +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } +func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } +func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } +func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } +func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } +func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } +func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } +func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } +func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } +func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } +func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } +func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } +func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } +func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } +func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } +func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } +func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } +func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } +func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } +func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } +func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } +func (p pointer) String() *string { return p.v.Interface().(*string) } +func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } +func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } +func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } +func (p pointer) Extensions() *map[int32]ExtensionField { + return p.v.Interface().(*map[int32]ExtensionField) +} + +func (p pointer) Elem() pointer { + return pointer{v: p.v.Elem()} +} + +// PointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) PointerSlice() []pointer { + // TODO: reconsider this + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + sp := p.v.Elem() + sp.Set(reflect.Append(sp, v.v)) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + p.v.Elem().Set(v.v) +} + +func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } +func (ms *messageState) pointer() pointer { panic("not supported") } +func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } + +type atomicNilMessage struct { + once sync.Once + m messageReflectWrapper +} + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + m.once.Do(func() { + m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) + m.m.mi = mi + }) + return &m.m +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go new file mode 100644 index 00000000..088aa85d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -0,0 +1,173 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const UnsafeEnabled = true + +// Pointer is an opaque pointer type. +type Pointer unsafe.Pointer + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the byte offset to the field from the start of the struct. +type offset uintptr + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + return offset(f.Offset) +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f != invalidOffset } + +// invalidOffset is an invalid field offset. +var invalidOffset = ^offset(0) + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset(0) + +// pointer is a pointer to a message struct or field. +type pointer struct{ p unsafe.Pointer } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointer{p: unsafe.Pointer(p)} +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + type ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } + return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.p == nil +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The pointer must be valid and pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if p.IsNil() { + panic("invalid nil pointer") + } + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + // TODO: Use tricky unsafe magic to directly create ifaceHeader. + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return (*bool)(p.p) } +func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } +func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } +func (p pointer) Int32() *int32 { return (*int32)(p.p) } +func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } +func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } +func (p pointer) Int64() *int64 { return (*int64)(p.p) } +func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } +func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } +func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } +func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } +func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } +func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } +func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } +func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } +func (p pointer) Float32() *float32 { return (*float32)(p.p) } +func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } +func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } +func (p pointer) Float64() *float64 { return (*float64)(p.p) } +func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } +func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } +func (p pointer) String() *string { return (*string)(p.p) } +func (p pointer) StringPtr() **string { return (**string)(p.p) } +func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } +func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } +func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } + +func (p pointer) Elem() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// PointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) PointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) +} + +// Static check that MessageState does not exceed the size of a pointer. +const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) + +func (Export) MessageStateOf(p Pointer) *messageState { + // Super-tricky - see documentation on MessageState. + return (*messageState)(unsafe.Pointer(p)) +} +func (ms *messageState) pointer() pointer { + // Super-tricky - see documentation on MessageState. + return pointer{p: unsafe.Pointer(ms)} +} +func (ms *messageState) messageInfo() *MessageInfo { + mi := ms.LoadMessageInfo() + if mi == nil { + panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") + } + return mi +} +func (ms *messageState) LoadMessageInfo() *MessageInfo { + return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) +} +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) +} + +type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + if p := atomic.LoadPointer(&m.p); p != nil { + return (*messageReflectWrapper)(p) + } + w := &messageReflectWrapper{mi: mi} + atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) + return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go new file mode 100644 index 00000000..08cfb605 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -0,0 +1,576 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "math/bits" + "reflect" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ValidationStatus is the result of validating the wire-format encoding of a message. +type ValidationStatus int + +const ( + // ValidationUnknown indicates that unmarshaling the message might succeed or fail. + // The validator was unable to render a judgement. + // + // The only causes of this status are an aberrant message type appearing somewhere + // in the message or a failure in the extension resolver. + ValidationUnknown ValidationStatus = iota + 1 + + // ValidationInvalid indicates that unmarshaling the message will fail. + ValidationInvalid + + // ValidationValid indicates that unmarshaling the message will succeed. + ValidationValid +) + +func (v ValidationStatus) String() string { + switch v { + case ValidationUnknown: + return "ValidationUnknown" + case ValidationInvalid: + return "ValidationInvalid" + case ValidationValid: + return "ValidationValid" + default: + return fmt.Sprintf("ValidationStatus(%d)", int(v)) + } +} + +// Validate determines whether the contents of the buffer are a valid wire encoding +// of the message type. +// +// This function is exposed for testing. +func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { + in.Resolver = preg.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { + out.Flags |= piface.UnmarshalInitialized + } + return out, st +} + +type validationInfo struct { + mi *MessageInfo + typ validationType + keyType, valType validationType + + // For non-required fields, requiredBit is 0. + // + // For required fields, requiredBit's nth bit is set, where n is a + // unique index in the range [0, MessageInfo.numRequiredFields). + // + // If there are more than 64 required fields, requiredBit is 0. + requiredBit uint64 +} + +type validationType uint8 + +const ( + validationTypeOther validationType = iota + validationTypeMessage + validationTypeGroup + validationTypeMap + validationTypeRepeatedVarint + validationTypeRepeatedFixed32 + validationTypeRepeatedFixed64 + validationTypeVarint + validationTypeFixed32 + validationTypeFixed64 + validationTypeBytes + validationTypeUTF8String + validationTypeMessageSetItem +) + +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + } + default: + vi = newValidationInfo(fd, ft) + } + if fd.Cardinality() == pref.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count + // of the required fields past 64. + if mi.numRequiredFields < math.MaxUint8 { + mi.numRequiredFields++ + vi.requiredBit = 1 << (mi.numRequiredFields - 1) + } + } + return vi +} + +func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeRepeatedVarint + case protowire.Fixed32Type: + vi.typ = validationTypeRepeatedFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeRepeatedFixed64 + } + } + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { + case pref.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeVarint + case protowire.Fixed32Type: + vi.typ = validationTypeFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeFixed64 + case protowire.BytesType: + vi.typ = validationTypeBytes + } + } + } + return vi +} + +func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { + mi.init() + type validationState struct { + typ validationType + keyType, valType validationType + endGroup protowire.Number + mi *MessageInfo + tail []byte + requiredMask uint64 + } + + // Pre-allocate some slots to avoid repeated slice reallocation. + states := make([]validationState, 0, 16) + states = append(states, validationState{ + typ: validationTypeMessage, + mi: mi, + }) + if groupTag > 0 { + states[0].typ = validationTypeGroup + states[0].endGroup = groupTag + } + initialized := true + start := len(b) +State: + for len(states) > 0 { + st := &states[len(states)-1] + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, ValidationInvalid + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if st.endGroup == num { + goto PopState + } + return out, ValidationInvalid + } + var vi validationInfo + switch { + case st.typ == validationTypeMap: + switch num { + case genid.MapEntry_Key_field_number: + vi.typ = st.keyType + case genid.MapEntry_Value_field_number: + vi.typ = st.valType + vi.mi = st.mi + vi.requiredBit = 1 + } + case flags.ProtoLegacy && st.mi.isMessageSet: + switch num { + case messageset.FieldItem: + vi.typ = validationTypeMessageSetItem + } + default: + var f *coderFieldInfo + if int(num) < len(st.mi.denseCoderFields) { + f = st.mi.denseCoderFields[num] + } else { + f = st.mi.coderFields[num] + } + if f != nil { + vi = f.validation + if vi.typ == validationTypeMessage && vi.mi == nil { + // Probable weak field. + // + // TODO: Consider storing the results of this lookup somewhere + // rather than recomputing it on every validation. + fd := st.mi.Desc.Fields().ByNumber(num) + if fd == nil || !fd.IsWeak() { + break + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) + case preg.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown + } + } + break + } + // Possible extension field. + // + // TODO: We should return ValidationUnknown when: + // 1. The resolver is not frozen. (More extensions may be added to it.) + // 2. The resolver returns preg.NotFound. + // In this case, a type added to the resolver in the future could cause + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) + if err != nil && err != preg.NotFound { + return out, ValidationUnknown + } + if err == nil { + vi = getExtensionFieldInfo(xt).validation + } + } + if vi.requiredBit != 0 { + // Check that the field has a compatible wire type. + // We only need to consider non-repeated field types, + // since repeated fields (and maps) can never be required. + ok := false + switch vi.typ { + case validationTypeVarint: + ok = wtyp == protowire.VarintType + case validationTypeFixed32: + ok = wtyp == protowire.Fixed32Type + case validationTypeFixed64: + ok = wtyp == protowire.Fixed64Type + case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: + ok = wtyp == protowire.BytesType + case validationTypeGroup: + ok = wtyp == protowire.StartGroupType + } + if ok { + st.requiredMask |= vi.requiredBit + } + } + + switch wtyp { + case protowire.VarintType: + if len(b) >= 10 { + switch { + case b[0] < 0x80: + b = b[1:] + case b[1] < 0x80: + b = b[2:] + case b[2] < 0x80: + b = b[3:] + case b[3] < 0x80: + b = b[4:] + case b[4] < 0x80: + b = b[5:] + case b[5] < 0x80: + b = b[6:] + case b[6] < 0x80: + b = b[7:] + case b[7] < 0x80: + b = b[8:] + case b[8] < 0x80: + b = b[9:] + case b[9] < 0x80 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } else { + switch { + case len(b) > 0 && b[0] < 0x80: + b = b[1:] + case len(b) > 1 && b[1] < 0x80: + b = b[2:] + case len(b) > 2 && b[2] < 0x80: + b = b[3:] + case len(b) > 3 && b[3] < 0x80: + b = b[4:] + case len(b) > 4 && b[4] < 0x80: + b = b[5:] + case len(b) > 5 && b[5] < 0x80: + b = b[6:] + case len(b) > 6 && b[6] < 0x80: + b = b[7:] + case len(b) > 7 && b[7] < 0x80: + b = b[8:] + case len(b) > 8 && b[8] < 0x80: + b = b[9:] + case len(b) > 9 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } + continue State + case protowire.BytesType: + var size uint64 + if len(b) >= 1 && b[0] < 0x80 { + size = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + size = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + size, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + if size > uint64(len(b)) { + return out, ValidationInvalid + } + v := b[:size] + b = b[size:] + switch vi.typ { + case validationTypeMessage: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + fallthrough + case validationTypeMap: + if vi.mi != nil { + vi.mi.init() + } + states = append(states, validationState{ + typ: vi.typ, + keyType: vi.keyType, + valType: vi.valType, + mi: vi.mi, + tail: b, + }) + b = v + continue State + case validationTypeRepeatedVarint: + // Packed field. + for len(v) > 0 { + _, n := protowire.ConsumeVarint(v) + if n < 0 { + return out, ValidationInvalid + } + v = v[n:] + } + case validationTypeRepeatedFixed32: + // Packed field. + if len(v)%4 != 0 { + return out, ValidationInvalid + } + case validationTypeRepeatedFixed64: + // Packed field. + if len(v)%8 != 0 { + return out, ValidationInvalid + } + case validationTypeUTF8String: + if !utf8.Valid(v) { + return out, ValidationInvalid + } + } + case protowire.Fixed32Type: + if len(b) < 4 { + return out, ValidationInvalid + } + b = b[4:] + case protowire.Fixed64Type: + if len(b) < 8 { + return out, ValidationInvalid + } + b = b[8:] + case protowire.StartGroupType: + switch { + case vi.typ == validationTypeGroup: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + states = append(states, validationState{ + typ: validationTypeGroup, + mi: vi.mi, + endGroup: num, + }) + continue State + case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: + typeid, v, n, err := messageset.ConsumeFieldValue(b, false) + if err != nil { + return out, ValidationInvalid + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { + case err == preg.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown + default: + xvi := getExtensionFieldInfo(xt).validation + if xvi.mi != nil { + xvi.mi.init() + } + states = append(states, validationState{ + typ: xvi.typ, + mi: xvi.mi, + tail: b[n:], + }) + b = v + continue State + } + default: + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + default: + return out, ValidationInvalid + } + } + if st.endGroup != 0 { + return out, ValidationInvalid + } + if len(b) != 0 { + return out, ValidationInvalid + } + b = st.tail + PopState: + numRequiredFields := 0 + switch st.typ { + case validationTypeMessage, validationTypeGroup: + numRequiredFields = int(st.mi.numRequiredFields) + case validationTypeMap: + // If this is a map field with a message value that contains + // required fields, require that the value be present. + if st.mi != nil && st.mi.numRequiredFields > 0 { + numRequiredFields = 1 + } + } + // If there are more than 64 required fields, this check will + // always fail and we will report that the message is potentially + // uninitialized. + if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { + initialized = false + } + states = states[:len(states)-1] + } + out.n = start - len(b) + if initialized { + out.initialized = true + } + return out, ValidationValid +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go new file mode 100644 index 00000000..009cbefd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// weakFields adds methods to the exported WeakFields type for internal use. +// +// The exported type is an alias to an unnamed type, so methods can't be +// defined directly on it. +type weakFields WeakFields + +func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok +} + +func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} + +func (w *weakFields) clear(num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go new file mode 100644 index 00000000..a3de1cf3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mapsort provides sorted access to maps. +package mapsort + +import ( + "sort" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Range iterates over every map entry in sorted key order, +// calling f for each key and value encountered. +func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { + var keys []protoreflect.MapKey + mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { + keys = append(keys, key) + return true + }) + sort.Slice(keys, func(i, j int) bool { + switch keyKind { + case protoreflect.BoolKind: + return !keys[i].Bool() && keys[j].Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return keys[i].Int() < keys[j].Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, + protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return keys[i].Uint() < keys[j].Uint() + case protoreflect.StringKind: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keyKind.String()) + } + }) + for _, key := range keys { + if !f(key, mapv.Get(key)) { + break + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go new file mode 100644 index 00000000..49dc4fcd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pragma provides types that can be embedded into a struct to +// statically enforce or prevent certain language properties. +package pragma + +import "sync" + +// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. +type NoUnkeyedLiterals struct{} + +// DoNotImplement can be embedded in an interface to prevent trivial +// implementations of the interface. +// +// This is useful to prevent unauthorized implementations of an interface +// so that it can be extended in the future for any protobuf language changes. +type DoNotImplement interface{ ProtoInternal(DoNotImplement) } + +// DoNotCompare can be embedded in a struct to prevent comparability. +type DoNotCompare [0]func() + +// DoNotCopy can be embedded in a struct to help prevent shallow copies. +// This does not rely on a Go language feature, but rather a special case +// within the vet checker. +// +// See https://golang.org/issues/8005. +type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go new file mode 100644 index 00000000..d3d7f89a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/set/ints.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides simple set data structures for uint64s. +package set + +import "math/bits" + +// int64s represents a set of integers within the range of 0..63. +type int64s uint64 + +func (bs *int64s) Len() int { + return bits.OnesCount64(uint64(*bs)) +} +func (bs *int64s) Has(n uint64) bool { + return uint64(*bs)&(uint64(1)< 0 +} +func (bs *int64s) Set(n uint64) { + *(*uint64)(bs) |= uint64(1) << n +} +func (bs *int64s) Clear(n uint64) { + *(*uint64)(bs) &^= uint64(1) << n +} + +// Ints represents a set of integers within the range of 0..math.MaxUint64. +type Ints struct { + lo int64s + hi map[uint64]struct{} +} + +func (bs *Ints) Len() int { + return bs.lo.Len() + len(bs.hi) +} +func (bs *Ints) Has(n uint64) bool { + if n < 64 { + return bs.lo.Has(n) + } + _, ok := bs.hi[n] + return ok +} +func (bs *Ints) Set(n uint64) { + if n < 64 { + bs.lo.Set(n) + return + } + if bs.hi == nil { + bs.hi = make(map[uint64]struct{}) + } + bs.hi[n] = struct{}{} +} +func (bs *Ints) Clear(n uint64) { + if n < 64 { + bs.lo.Clear(n) + return + } + delete(bs.hi, n) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go new file mode 100644 index 00000000..0b74e765 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package strs provides string manipulation functionality specific to protobuf. +package strs + +import ( + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// EnforceUTF8 reports whether to enforce strict UTF-8 validation. +func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { + if flags.ProtoLegacy { + if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { + return fd.EnforceUTF8() + } + } + return fd.Syntax() == protoreflect.Proto3 +} + +// GoCamelCase camel-cases a protobuf name for use as a Go identifier. +// +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +func GoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +// GoSanitized converts a string to a valid Go identifier. +func GoSanitized(s string) string { + // Sanitize the input to the set of valid characters, + // which must be '_' or be in the Unicode L or N categories. + s = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' + }, s) + + // Prepend '_' in the event of a Go keyword conflict or if + // the identifier is invalid (does not start in the Unicode L category). + r, _ := utf8.DecodeRuneInString(s) + if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { + return "_" + s + } + return s +} + +// JSONCamelCase converts a snake_case identifier to a camelCase identifier, +// according to the protobuf JSON specification. +func JSONCamelCase(s string) string { + var b []byte + var wasUnderscore bool + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if c != '_' { + if wasUnderscore && isASCIILower(c) { + c -= 'a' - 'A' // convert to uppercase + } + b = append(b, c) + } + wasUnderscore = c == '_' + } + return string(b) +} + +// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, +// according to the protobuf JSON specification. +func JSONSnakeCase(s string) string { + var b []byte + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if isASCIIUpper(c) { + b = append(b, '_') + c += 'a' - 'A' // convert to lowercase + } + b = append(b, c) + } + return string(b) +} + +// MapEntryName derives the name of the map entry message given the field name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 +func MapEntryName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(c)) + } + } + b = append(b, "Entry"...) + return string(b) +} + +// EnumValueName derives the camel-cased enum value name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 +func EnumValueName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(unicode.ToLower(c))) + upperNext = false + } + } + return string(b) +} + +// TrimEnumPrefix trims the enum name prefix from an enum value name, +// where the prefix is all lowercase without underscores. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 +func TrimEnumPrefix(s, prefix string) string { + s0 := s // original input + for len(s) > 0 && len(prefix) > 0 { + if s[0] == '_' { + s = s[1:] + continue + } + if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { + return s0 // no prefix match + } + s, prefix = s[1:], prefix[1:] + } + if len(prefix) > 0 { + return s0 // no prefix match + } + s = strings.TrimLeft(s, "_") + if len(s) == 0 { + return s0 // avoid returning empty string + } + return s +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} +func isASCIIUpper(c byte) bool { + return 'A' <= c && c <= 'Z' +} +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go new file mode 100644 index 00000000..85e074c9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package strs + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} + +type Builder struct{} + +func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + return prefix.Append(name) +} + +func (*Builder) MakeString(b []byte) string { + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go new file mode 100644 index 00000000..2160c701 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package strs + +import ( + "unsafe" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return pref.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go new file mode 100644 index 00000000..72cf770b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package version records versioning information about this module. +package version + +import ( + "fmt" + "strings" +) + +// These constants determine the current version of this module. +// +// +// For our release process, we enforce the following rules: +// * Tagged releases use a tag that is identical to String. +// * Tagged releases never reference a commit where the String +// contains "devel". +// * The set of all commits in this repository where String +// does not contain "devel" must have a unique String. +// +// +// Steps for tagging a new release: +// 1. Create a new CL. +// +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". +// +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. +// +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. +// +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. +// +// 6. Tag a new version, where the tag is is the current String. +// +// 7. Write release notes for all notable changes +// between this release and the last release. +// +// 8. Create a new CL. +// +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. +const ( + Major = 1 + Minor = 25 + Patch = 0 + PreRelease = "" +) + +// String formats the version string for this module in semver format. +// +// Examples: +// v1.20.1 +// v1.21.0-rc.1 +func String() string { + v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) + if PreRelease != "" { + v += "-" + PreRelease + + // TODO: Add metadata about the commit or build hash. + // See https://golang.org/issue/29814 + // See https://golang.org/issue/33533 + var metadata string + if strings.Contains(PreRelease, "devel") && metadata != "" { + v += "+" + metadata + } + } + return v +} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go new file mode 100644 index 00000000..3e9a6a2f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// CheckInitialized returns an error if any required fields in m are not set. +func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + + return checkInitialized(m.ProtoReflect()) +} + +// CheckInitialized returns an error if any required fields in m are not set. +func checkInitialized(m protoreflect.Message) error { + if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { + _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ + Message: m, + }) + return err + } + return checkInitializedSlow(m) +} + +func checkInitializedSlow(m protoreflect.Message) error { + md := m.Descriptor() + fds := md.Fields() + for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { + fd := fds.ByNumber(nums.Get(i)) + if !m.Has(fd) { + return errors.RequiredNotSet(string(fd.FullName())) + } + } + var err error + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + if fd.Message() == nil { + return true + } + for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { + err = checkInitialized(list.Get(i).Message()) + } + case fd.IsMap(): + if fd.MapValue().Message() == nil { + return true + } + v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { + err = checkInitialized(v.Message()) + return err == nil + }) + default: + if fd.Message() == nil { + return true + } + err = checkInitialized(v.Message()) + } + return err == nil + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go new file mode 100644 index 00000000..42fc5195 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -0,0 +1,274 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// UnmarshalOptions configures the unmarshaler. +// +// Example usage: +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Merge merges the input into the destination message. + // The default behavior is to always reset the message before unmarshaling, + // unless Merge is specified. + Merge bool + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return an error if there are any missing required fields. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// Unmarshal parses the wire-format message in b and places the result in m. +func Unmarshal(b []byte, m Message) error { + _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + return err +} + +// Unmarshal parses the wire-format message in b and places the result in m. +func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + _, err := o.unmarshal(b, m.ProtoReflect()) + return err +} + +// UnmarshalState parses a wire-format message and places the result in m. +// +// This method permits fine-grained control over the unmarshaler. +// Most users should use Unmarshal instead. +func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + return o.unmarshal(in.Buf, in.Message) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if !o.Merge { + Reset(m.Interface()) + } + allowPartial := o.AllowPartial + o.Merge = true + o.AllowPartial = true + methods := protoMethods(m) + if methods != nil && methods.Unmarshal != nil && + !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { + in := protoiface.UnmarshalInput{ + Message: m, + Buf: b, + Resolver: o.Resolver, + } + if o.DiscardUnknown { + in.Flags |= protoiface.UnmarshalDiscardUnknown + } + out, err = methods.Unmarshal(in) + } else { + err = o.unmarshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { + return out, nil + } + return out, checkInitialized(m) +} + +func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { + _, err := o.unmarshal(b, m) + return err +} + +func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { + md := m.Descriptor() + if messageset.IsMessageSet(md) { + return o.unmarshalMessageSet(b, m) + } + fields := md.Fields() + for len(b) > 0 { + // Parse the tag (field number and wire type). + num, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return protowire.ParseError(tagLen) + } + if num > protowire.MaxValidNumber { + return errors.New("invalid field number") + } + + // Find the field descriptor for this field number. + fd := fields.ByNumber(num) + if fd == nil && md.ExtensionRanges().Has(num) { + extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + if extType != nil { + fd = extType.TypeDescriptor() + } + } + var err error + if fd == nil { + err = errUnknown + } else if flags.ProtoLegacy { + if fd.IsWeak() && fd.Message().IsPlaceholder() { + err = errUnknown // weak referent is not linked in + } + } + + // Parse the field value. + var valLen int + switch { + case err != nil: + case fd.IsList(): + valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) + case fd.IsMap(): + valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) + default: + valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) + } + if err != nil { + if err != errUnknown { + return err + } + valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) + if valLen < 0 { + return protowire.ParseError(valLen) + } + if !o.DiscardUnknown { + m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) + } + } + b = b[tagLen+valLen:] + } + return nil +} + +func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { + v, n, err := o.unmarshalScalar(b, wtyp, fd) + if err != nil { + return 0, err + } + switch fd.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + m2 := m.Mutable(fd).Message() + if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { + return n, err + } + default: + // Non-message scalars replace the previous value. + m.Set(fd, v) + } + return n, nil +} + +func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if wtyp != protowire.BytesType { + return 0, errUnknown + } + b, n = protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + var ( + keyField = fd.MapKey() + valField = fd.MapValue() + key protoreflect.Value + val protoreflect.Value + haveKey bool + haveVal bool + ) + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + val = mapv.NewValue() + } + // Map entries are represented as a two-element message with fields + // containing the key and value. + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return 0, errors.New("invalid field number") + } + b = b[n:] + err = errUnknown + switch num { + case genid.MapEntry_Key_field_number: + key, n, err = o.unmarshalScalar(b, wtyp, keyField) + if err != nil { + break + } + haveKey = true + case genid.MapEntry_Value_field_number: + var v protoreflect.Value + v, n, err = o.unmarshalScalar(b, wtyp, valField) + if err != nil { + break + } + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { + return 0, err + } + default: + val = v + } + haveVal = true + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, protowire.ParseError(n) + } + } else if err != nil { + return 0, err + } + b = b[n:] + } + // Every map entry should have entries for key and value, but this is not strictly required. + if !haveKey { + key = keyField.Default() + } + if !haveVal { + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + default: + val = valField.Default() + } + } + mapv.Set(key.MapKey(), val) + return n, nil +} + +// errUnknown is used internally to indicate fields which should be added +// to the unknown field set of a message. It is never returned from an exported +// function. +var errUnknown = errors.New("BUG: internal error (unknown)") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go new file mode 100644 index 00000000..d6dc904d --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -0,0 +1,603 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unmarshalScalar decodes a value of the given kind. +// +// Message values are decoded into a []byte which aliases the input data. +func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil + case protoreflect.EnumKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil + case protoreflect.Int32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Sint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil + case protoreflect.Uint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.Int64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Sint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil + case protoreflect.Uint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.Sfixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Fixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.FloatKind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil + case protoreflect.Sfixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Fixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.DoubleKind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) + } + return protoreflect.ValueOfString(string(v)), n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(v), n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(v), n, nil + default: + return val, 0, errUnknown + } +} + +func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + return n, nil + case protoreflect.EnumKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + return n, nil + case protoreflect.Int32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Sint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + return n, nil + case protoreflect.Uint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.Int64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Sint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + return n, nil + case protoreflect.Uint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.Sfixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Fixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.FloatKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + return n, nil + case protoreflect.Sfixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Fixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.DoubleKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + return n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return 0, errors.InvalidUTF8(string(fd.FullName())) + } + list.Append(protoreflect.ValueOfString(string(v))) + return n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + return n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return 0, protowire.ParseError(n) + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + default: + return 0, errUnknown + } +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go new file mode 100644 index 00000000..c52d8c4a --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functions operating on protocol buffer messages. +// +// For documentation on protocol buffers in general, see: +// +// https://developers.google.com/protocol-buffers +// +// For a tutorial on using protocol buffers with Go, see: +// +// https://developers.google.com/protocol-buffers/docs/gotutorial +// +// For a guide to generated Go protocol buffer code, see: +// +// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// +// +// Binary serialization +// +// This package contains functions to convert to and from the wire format, +// an efficient binary serialization of protocol buffers. +// +// • Size reports the size of a message in the wire format. +// +// • Marshal converts a message to the wire format. +// The MarshalOptions type provides more control over wire marshaling. +// +// • Unmarshal converts a message from the wire format. +// The UnmarshalOptions type provides more control over wire unmarshaling. +// +// +// Basic message operations +// +// • Clone makes a deep copy of a message. +// +// • Merge merges the content of a message into another. +// +// • Equal compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// "google.golang.org/protobuf/testing/protocmp". +// +// • Reset clears the content of a message. +// +// • CheckInitialized reports whether all required fields in a message are set. +// +// +// Optional scalar constructors +// +// The API for some generated messages represents optional scalar fields +// as pointers to a value. For example, an optional string field has the +// Go type *string. +// +// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. +// +// Generated enum types usually have an Enum method which performs the +// same operation. +// +// Optional scalar fields are only supported in proto2. +// +// +// Extension accessors +// +// • HasExtension, GetExtension, SetExtension, and ClearExtension +// access extension field values in a protocol buffer message. +// +// Extension fields are only supported in proto2. +// +// +// Related packages +// +// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to +// and from JSON. +// +// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to +// and from the text format. +// +// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a +// reflection interface for protocol buffer data types. +// +// • Package "google.golang.org/protobuf/testing/protocmp" provides features +// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" +// package. +// +// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. +// +// This module contains additional packages for more specialized use cases. +// Consult the individual package documentation for details. +package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go new file mode 100644 index 00000000..7b47a118 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -0,0 +1,346 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// MarshalOptions configures the marshaler. +// +// Example usage: +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return an error if there are any missing required fields. + AllowPartial bool + + // Deterministic controls whether the same message will always be + // serialized to the same bytes within the same binary. + // + // Setting this option guarantees that repeated serialization of + // the same message will return the same bytes, and that different + // processes of the same binary (which may be executing on different + // machines) will serialize equal messages to the same bytes. + // It has no effect on the resulting size of the encoded message compared + // to a non-deterministic marshal. + // + // Note that the deterministic serialization is NOT canonical across + // languages. It is not guaranteed to remain stable over time. It is + // unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization (e.g., persistent + // storage in a canonical form, fingerprinting, etc.) must define + // their own canonicalization specification and implement their own + // serializer rather than relying on this API. + // + // If deterministic serialization is requested, map entries will be + // sorted by keys in lexographical order. This is an implementation + // detail and subject to change. + Deterministic bool + + // UseCachedSize indicates that the result of a previous Size call + // may be reused. + // + // Setting this option asserts that: + // + // 1. Size has previously been called on this message with identical + // options (except for UseCachedSize itself). + // + // 2. The message and all its submessages have not changed in any + // way since the Size call. + // + // If either of these invariants is violated, + // the results are undefined and may include panics or corrupted output. + // + // Implementations MAY take this option into account to provide + // better performance, but there is no guarantee that they will do so. + // There is absolutely no guarantee that Size followed by Marshal with + // UseCachedSize set will perform equivalently to Marshal alone. + UseCachedSize bool +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// Marshal returns the wire-format encoding of m. +func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// m1.OptionalBytes, _ = proto.Marshal(m2) +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + +// MarshalAppend appends the wire-format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + + out, err := o.marshal(b, m.ProtoReflect()) + return out.Buf, err +} + +// MarshalState returns the wire-format encoding of a message. +// +// This method permits fine-grained control over the marshaler. +// Most users should use Marshal instead. +func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + return o.marshal(in.Buf, in.Message) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { + allowPartial := o.AllowPartial + o.AllowPartial = true + if methods := protoMethods(m); methods != nil && methods.Marshal != nil && + !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { + in := protoiface.MarshalInput{ + Message: m, + Buf: b, + } + if o.Deterministic { + in.Flags |= protoiface.MarshalDeterministic + } + if o.UseCachedSize { + in.Flags |= protoiface.MarshalUseCachedSize + } + if methods.Size != nil { + sout := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: in.Flags, + }) + if cap(b) < len(b)+sout.Size { + in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) + copy(in.Buf, b) + } + in.Flags |= protoiface.MarshalUseCachedSize + } + out, err = methods.Marshal(in) + } else { + out.Buf, err = o.marshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial { + return out, nil + } + return out, checkInitialized(m) +} + +func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { + out, err := o.marshal(b, m) + return out.Buf, err +} + +// growcap scales up the capacity of a slice. +// +// Given a slice with a current capacity of oldcap and a desired +// capacity of wantcap, growcap returns a new capacity >= wantcap. +// +// The algorithm is mostly identical to the one used by append as of Go 1.14. +func growcap(oldcap, wantcap int) (newcap int) { + if wantcap > oldcap*2 { + newcap = wantcap + } else if oldcap < 1024 { + // The Go 1.14 runtime takes this case when len(s) < 1024, + // not when cap(s) < 1024. The difference doesn't seem + // significant here. + newcap = oldcap * 2 + } else { + newcap = oldcap + for 0 < newcap && newcap < wantcap { + newcap += newcap / 4 + } + if newcap <= 0 { + newcap = wantcap + } + } + return newcap +} + +func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.marshalMessageSet(b, m) + } + // There are many choices for what order we visit fields in. The default one here + // is chosen for reasonable efficiency and simplicity given the protoreflect API. + // It is not deterministic, since Message.Range does not return fields in any + // defined order. + // + // When using deterministic serialization, we sort the known fields. + var err error + o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + b = append(b, m.GetUnknown()...) + return b, nil +} + +// rangeFields visits fields in a defined order when deterministic serialization is enabled. +func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !o.Deterministic { + m.Range(f) + return + } + var fds []protoreflect.FieldDescriptor + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + fds = append(fds, fd) + return true + }) + sort.Slice(fds, func(a, b int) bool { + return fieldsort.Less(fds[a], fds[b]) + }) + for _, fd := range fds { + if !f(fd, m.Get(fd)) { + break + } + } +} + +func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + switch { + case fd.IsList(): + return o.marshalList(b, fd, value.List()) + case fd.IsMap(): + return o.marshalMap(b, fd, value.Map()) + default: + b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) + return o.marshalSingular(b, fd, value) + } +} + +func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { + if fd.IsPacked() && list.Len() > 0 { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + b, pos := appendSpeculativeLength(b) + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + b = finishSpeculativeLength(b, pos) + return b, nil + } + + kind := fd.Kind() + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + return b, nil +} + +func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { + keyf := fd.MapKey() + valf := fd.MapValue() + var err error + o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + var pos int + b, pos = appendSpeculativeLength(b) + + b, err = o.marshalField(b, keyf, key.Value()) + if err != nil { + return false + } + b, err = o.marshalField(b, valf, value) + if err != nil { + return false + } + b = finishSpeculativeLength(b, pos) + return true + }) + return b, err +} + +func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { + if !o.Deterministic { + mapv.Range(f) + return + } + mapsort.Range(mapv, kind, f) +} + +// When encoding length-prefixed fields, we speculatively set aside some number of bytes +// for the length, encode the data, and then encode the length (shifting the data if necessary +// to make room). +const speculativeLength = 1 + +func appendSpeculativeLength(b []byte) ([]byte, int) { + pos := len(b) + b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) + return b, pos +} + +func finishSpeculativeLength(b []byte, pos int) []byte { + mlen := len(b) - pos - speculativeLength + msiz := protowire.SizeVarint(uint64(mlen)) + if msiz != speculativeLength { + for i := 0; i < msiz-speculativeLength; i++ { + b = append(b, 0) + } + copy(b[pos+msiz:], b[pos+speculativeLength:]) + b = b[:pos+msiz+mlen] + } + protowire.AppendVarint(b[:pos], uint64(mlen)) + return b +} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go new file mode 100644 index 00000000..185dacfb --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} + +func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + b = protowire.AppendVarint(b, uint64(v.Enum())) + case protoreflect.Int32Kind: + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + b = protowire.AppendVarint(b, uint64(v.Int())) + case protoreflect.Sint64Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + b = protowire.AppendVarint(b, v.Uint()) + case protoreflect.Sfixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Int())) + case protoreflect.Fixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Uint())) + case protoreflect.FloatKind: + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + case protoreflect.Sfixed64Kind: + b = protowire.AppendFixed64(b, uint64(v.Int())) + case protoreflect.Fixed64Kind: + b = protowire.AppendFixed64(b, v.Uint()) + case protoreflect.DoubleKind: + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { + return b, errors.InvalidUTF8(string(fd.FullName())) + } + b = protowire.AppendString(b, v.String()) + case protoreflect.BytesKind: + b = protowire.AppendBytes(b, v.Bytes()) + case protoreflect.MessageKind: + var pos int + var err error + b, pos = appendSpeculativeLength(b) + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = finishSpeculativeLength(b, pos) + case protoreflect.GroupKind: + var err error + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) + default: + return b, errors.New("invalid kind %v", fd.Kind()) + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go new file mode 100644 index 00000000..10902bd8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -0,0 +1,154 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. If either of the top-level +// messages are invalid, then Equal reports true only if both are invalid. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } + return equalMessage(mx, my) +} + +// equalMessage compares two messages. +func equalMessage(mx, my pref.Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalField(fd, vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalField compares two fields. +func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + default: + return equalValue(fd, x, y) + } +} + +// equalMap compares two maps. +func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k pref.MapKey, vx pref.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +// equalList compares two lists. +func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalValue compares two singular values. +func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.Message() != nil: + return equalMessage(x.Message(), y.Message()) + case fd.Kind() == pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + fx := x.Float() + fy := y.Float() + if math.IsNaN(fx) || math.IsNaN(fy) { + return math.IsNaN(fx) && math.IsNaN(fy) + } + return fx == fy + default: + return x.Interface() == y.Interface() + } +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y pref.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[pref.FieldNumber]pref.RawFields) + my := make(map[pref.FieldNumber]pref.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go new file mode 100644 index 00000000..5f293cda --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// HasExtension reports whether an extension field is populated. +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + return false + } + + return m.ProtoReflect().Has(xt.TypeDescriptor()) +} + +// ClearExtension clears an extension field such that subsequent +// HasExtension calls return false. +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) +} + +// GetExtension retrieves the value for an extension field. +// If the field is unpopulated, it returns the default value for +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) +} + +// SetExtension stores the value of an extension field. +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) +} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go new file mode 100644 index 00000000..d761ab33 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -0,0 +1,139 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Merge merges src into dst, which must be a message with the same descriptor. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +// +// It is semantically equivalent to unmarshaling the encoded form of src +// into dst with the UnmarshalOptions.Merge option specified. +func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() + if dstMsg.Descriptor() != srcMsg.Descriptor() { + if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) + } + panic("descriptor mismatch") + } + mergeOptions{}.mergeMessage(dstMsg, srcMsg) +} + +// Clone returns a deep copy of m. +// If the top-level message is invalid, it returns an invalid message as well. +func Clone(m Message) Message { + // NOTE: Most usages of Clone assume the following properties: + // t := reflect.TypeOf(m) + // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) + // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) + // + // Embedding protobuf messages breaks this since the parent type will have + // a forwarded ProtoReflect method, but the Interface method will return + // the underlying embedded message type. + if m == nil { + return nil + } + src := m.ProtoReflect() + if !src.IsValid() { + return src.Type().Zero().Interface() + } + dst := src.New() + mergeOptions{}.mergeMessage(dst, src) + return dst.Interface() +} + +// mergeOptions provides a namespace for merge functions, and can be +// exported in the future if we add user-visible merge options. +type mergeOptions struct{} + +func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { + methods := protoMethods(dst) + if methods != nil && methods.Merge != nil { + in := protoiface.MergeInput{ + Destination: dst, + Source: src, + } + out := methods.Merge(in) + if out.Flags&protoiface.MergeComplete != 0 { + return + } + } + + if !dst.IsValid() { + panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) + } + + src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + o.mergeList(dst.Mutable(fd).List(), v.List(), fd) + case fd.IsMap(): + o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) + case fd.Message() != nil: + o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(fd, o.cloneBytes(v)) + default: + dst.Set(fd, v) + } + return true + }) + + if len(src.GetUnknown()) > 0 { + dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) + } +} + +func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { + // Merge semantics appends to the end of the existing list. + for i, n := 0, src.Len(); i < n; i++ { + switch v := src.Get(i); { + case fd.Message() != nil: + dstv := dst.NewElement() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Append(dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Append(o.cloneBytes(v)) + default: + dst.Append(v) + } + } +} + +func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { + // Merge semantics replaces, rather than merges into existing entries. + src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + switch { + case fd.Message() != nil: + dstv := dst.NewValue() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Set(k, dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(k, o.cloneBytes(v)) + default: + dst.Set(k, v) + } + return true + }) +} + +func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { + return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) +} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go new file mode 100644 index 00000000..1d692c3a --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -0,0 +1,88 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) { + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += messageset.SizeField(fd.Number()) + size += protowire.SizeTag(messageset.FieldMessage) + size += protowire.SizeBytes(o.size(v.Message())) + return true + }) + size += messageset.SizeUnknown(m.GetUnknown()) + return size +} + +func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + var err error + o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalMessageSetField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + return messageset.AppendUnknown(b, m.GetUnknown()) +} + +func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + b = messageset.AppendFieldStart(b, fd.Number()) + b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) + b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + b, err := o.marshalMessage(b, value.Message()) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error { + if !flags.ProtoLegacy { + return errors.New("no support for message_set_wire_format") + } + return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { + err := o.unmarshalMessageSetField(m, num, v) + if err == errUnknown { + unknown := m.GetUnknown() + unknown = protowire.AppendTag(unknown, num, protowire.BytesType) + unknown = protowire.AppendBytes(unknown, v) + m.SetUnknown(unknown) + return nil + } + return err + }) +} + +func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error { + md := m.Descriptor() + if !md.ExtensionRanges().Has(num) { + return errUnknown + } + xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err == protoregistry.NotFound { + return errUnknown + } + if err != nil { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + xd := xt.TypeDescriptor() + if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go new file mode 100644 index 00000000..ca14b09c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Message is the top-level interface that all messages must implement. +// It provides access to a reflective view of a message. +// Any implementation of this interface may be used with all functions in the +// protobuf module that accept a Message, except where otherwise specified. +// +// This is the v2 interface definition for protobuf messages. +// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// +// To convert a v1 message to a v2 message, +// use "github.com/golang/protobuf/proto".MessageV2. +// To convert a v2 message to a v1 message, +// use "github.com/golang/protobuf/proto".MessageV1. +type Message = protoreflect.ProtoMessage + +// Error matches all errors produced by packages in the protobuf module. +// +// That is, errors.Is(err, Error) reports whether an error is produced +// by this module. +var Error error + +func init() { + Error = errors.Error +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go new file mode 100644 index 00000000..d8dd604f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build !protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = true + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return m.ProtoMethods() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go new file mode 100644 index 00000000..b103d432 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = false + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go new file mode 100644 index 00000000..3d7f8943 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/reset.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Reset clears every field in the message. +// The resulting message shares no observable memory with its previous state +// other than the memory for the message itself. +func Reset(m Message) { + if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { + mr.Reset() + return + } + resetMessage(m.ProtoReflect()) +} + +func resetMessage(m protoreflect.Message) { + if !m.IsValid() { + panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) + } + + // Clear all known fields. + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + m.Clear(fds.Get(i)) + } + + // Clear extension fields. + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + m.Clear(fd) + return true + }) + + // Clear unknown fields. + m.SetUnknown(nil) +} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go new file mode 100644 index 00000000..554b9c6c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -0,0 +1,97 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + return MarshalOptions{}.Size(m) +} + +// Size returns the size in bytes of the wire-format encoding of m. +func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + + return o.size(m.ProtoReflect()) +} + +// size is a centralized function that all size operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for size that do not go through this. +func (o MarshalOptions) size(m protoreflect.Message) (size int) { + methods := protoMethods(m) + if methods != nil && methods.Size != nil { + out := methods.Size(protoiface.SizeInput{ + Message: m, + }) + return out.Size + } + if methods != nil && methods.Marshal != nil { + // This is not efficient, but we don't have any choice. + // This case is mainly used for legacy types with a Marshal method. + out, _ := methods.Marshal(protoiface.MarshalInput{ + Message: m, + }) + return len(out.Buf) + } + return o.sizeMessageSlow(m) +} + +func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.sizeMessageSet(m) + } + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += o.sizeField(fd, v) + return true + }) + size += len(m.GetUnknown()) + return size +} + +func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { + num := fd.Number() + switch { + case fd.IsList(): + return o.sizeList(num, fd, value.List()) + case fd.IsMap(): + return o.sizeMap(num, fd, value.Map()) + default: + return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value) + } +} + +func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + if fd.IsPacked() && list.Len() > 0 { + content := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + content += o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return protowire.SizeTag(num) + protowire.SizeBytes(content) + } + + for i, llen := 0, list.Len(); i < llen; i++ { + size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return size +} + +func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { + size += protowire.SizeTag(num) + size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) + return true + }) + return size +} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go new file mode 100644 index 00000000..3cf61a82 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size_gen.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { + switch kind { + case protoreflect.BoolKind: + return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + return protowire.SizeVarint(uint64(v.Enum())) + case protoreflect.Int32Kind: + return protowire.SizeVarint(uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + return protowire.SizeVarint(uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + return protowire.SizeVarint(uint64(v.Int())) + case protoreflect.Sint64Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + return protowire.SizeVarint(v.Uint()) + case protoreflect.Sfixed32Kind: + return protowire.SizeFixed32() + case protoreflect.Fixed32Kind: + return protowire.SizeFixed32() + case protoreflect.FloatKind: + return protowire.SizeFixed32() + case protoreflect.Sfixed64Kind: + return protowire.SizeFixed64() + case protoreflect.Fixed64Kind: + return protowire.SizeFixed64() + case protoreflect.DoubleKind: + return protowire.SizeFixed64() + case protoreflect.StringKind: + return protowire.SizeBytes(len(v.String())) + case protoreflect.BytesKind: + return protowire.SizeBytes(len(v.Bytes())) + case protoreflect.MessageKind: + return protowire.SizeBytes(o.size(v.Message())) + case protoreflect.GroupKind: + return protowire.SizeGroup(num, o.size(v.Message())) + default: + return 0 + } +} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go new file mode 100644 index 00000000..653b12c3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrappers.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 00000000..37f254d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,275 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 00000000..673a230e --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.JSONName.Init(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.JSONName.Init(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 00000000..cebb36cd --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 00000000..2d5fa993 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,371 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 00000000..00d35e02 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,242 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Package: proto.String(string(file.Package())), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + p.JsonName = proto.String(field.JSONName()) + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go new file mode 100644 index 00000000..6be5d16e --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -0,0 +1,77 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "google.golang.org/protobuf/internal/pragma" +) + +// The following types are used by the fast-path Message.ProtoMethods method. +// +// To avoid polluting the public protoreflect API with types used only by +// low-level implementations, the canonical definitions of these types are +// in the runtime/protoiface package. The definitions here and in protoiface +// must be kept in sync. +type ( + methods = struct { + pragma.NoUnkeyedLiterals + Flags supportFlags + Size func(sizeInput) sizeOutput + Marshal func(marshalInput) (marshalOutput, error) + Unmarshal func(unmarshalInput) (unmarshalOutput, error) + Merge func(mergeInput) mergeOutput + CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + } + supportFlags = uint64 + sizeInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Flags uint8 + } + sizeOutput = struct { + pragma.NoUnkeyedLiterals + Size int + } + marshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + } + marshalOutput = struct { + pragma.NoUnkeyedLiterals + Buf []byte + } + unmarshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + Resolver interface { + FindExtensionByName(field FullName) (ExtensionType, error) + FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) + } + } + unmarshalOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + mergeInput = struct { + pragma.NoUnkeyedLiterals + Source Message + Destination Message + } + mergeOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + checkInitializedInput = struct { + pragma.NoUnkeyedLiterals + Message Message + } + checkInitializedOutput = struct { + pragma.NoUnkeyedLiterals + } +) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go new file mode 100644 index 00000000..dd85915b --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -0,0 +1,504 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoreflect provides interfaces to dynamically manipulate messages. +// +// This package includes type descriptors which describe the structure of types +// defined in proto source files and value interfaces which provide the +// ability to examine and manipulate the contents of messages. +// +// +// Protocol Buffer Descriptors +// +// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// are immutable objects that represent protobuf type information. +// They are wrappers around the messages declared in descriptor.proto. +// Protobuf descriptors alone lack any information regarding Go types. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Descriptor and ProtoReflect.Descriptor accessors respectively +// return the protobuf descriptor for the values. +// +// The protobuf descriptor interfaces are not meant to be implemented by +// user code since they might need to be extended in the future to support +// additions to the protobuf language. +// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. +// +// +// Go Type Descriptors +// +// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// a concrete Go type that represents the associated protobuf descriptor. +// There is commonly a one-to-one relationship between protobuf descriptors and +// Go type descriptors, but it can potentially be a one-to-many relationship. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Type and ProtoReflect.Type accessors respectively +// return the protobuf descriptor for the values. +// +// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// create Go type descriptors from protobuf descriptors. +// +// +// Value Interfaces +// +// The Enum and Message interfaces provide a reflective view over an +// enum or message instance. For enums, it provides the ability to retrieve +// the enum value number for any concrete enum type. For messages, it provides +// the ability to access or manipulate fields of the message. +// +// To convert a proto.Message to a protoreflect.Message, use the +// former's ProtoReflect method. Since the ProtoReflect method is new to the +// v2 message interface, it may not be present on older message implementations. +// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// to obtain a reflective view on older messages. +// +// +// Relationships +// +// The following diagrams demonstrate the relationships between +// various types declared in this package. +// +// +// ┌───────────────────────────────────┐ +// V │ +// ┌────────────── New(n) ─────────────┐ │ +// │ │ │ +// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ +// │ │ V V │ V │ +// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ +// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ +// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ +// Λ Λ │ │ +// │ └─── Descriptor() ──┘ │ +// │ │ +// └────────────────── Type() ───────┘ +// +// • An EnumType describes a concrete Go enum type. +// It has an EnumDescriptor and can construct an Enum instance. +// +// • An EnumDescriptor describes an abstract protobuf enum type. +// +// • An Enum is a concrete enum instance. Generated enums implement Enum. +// +// +// ┌──────────────── New() ─────────────────┐ +// │ │ +// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +// │ │ V V │ V +// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ +// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ +// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ +// Λ Λ │ │ Λ │ +// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ +// │ │ +// └─────────────────── Type() ─────────┘ +// +// • A MessageType describes a concrete Go message type. +// It has a MessageDescriptor and can construct a Message instance. +// +// • A MessageDescriptor describes an abstract protobuf message type. +// +// • A Message is a concrete message instance. Generated messages implement +// ProtoMessage, which can convert to/from a Message. +// +// +// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ +// │ V │ V +// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ +// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ +// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ +// Λ │ │ Λ │ Λ +// └─────── Type() ───────┘ │ └─── may implement ────┘ │ +// │ │ +// └────── implements ────────┘ +// +// • An ExtensionType describes a concrete Go implementation of an extension. +// It has an ExtensionTypeDescriptor and can convert to/from +// abstract Values and Go values. +// +// • An ExtensionTypeDescriptor is an ExtensionDescriptor +// which also has an ExtensionType. +// +// • An ExtensionDescriptor describes an abstract protobuf extension field and +// may not always be an ExtensionTypeDescriptor. +package protoreflect + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/pragma" +) + +type doNotImplement pragma.DoNotImplement + +// ProtoMessage is the top-level interface that all proto messages implement. +// This is declared in the protoreflect package to avoid a cyclic dependency; +// use the proto.Message type instead, which aliases this type. +type ProtoMessage interface{ ProtoReflect() Message } + +// Syntax is the language version of the proto file. +type Syntax syntax + +type syntax int8 // keep exact type opaque as the int type may change + +const ( + Proto2 Syntax = 2 + Proto3 Syntax = 3 +) + +// IsValid reports whether the syntax is valid. +func (s Syntax) IsValid() bool { + switch s { + case Proto2, Proto3: + return true + default: + return false + } +} + +// String returns s as a proto source identifier (e.g., "proto2"). +func (s Syntax) String() string { + switch s { + case Proto2: + return "proto2" + case Proto3: + return "proto3" + default: + return fmt.Sprintf("", s) + } +} + +// GoString returns s as a Go source identifier (e.g., "Proto2"). +func (s Syntax) GoString() string { + switch s { + case Proto2: + return "Proto2" + case Proto3: + return "Proto3" + default: + return fmt.Sprintf("Syntax(%d)", s) + } +} + +// Cardinality determines whether a field is optional, required, or repeated. +type Cardinality cardinality + +type cardinality int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Cardinality enumeration. +const ( + Optional Cardinality = 1 // appears zero or one times + Required Cardinality = 2 // appears exactly one time; invalid with Proto3 + Repeated Cardinality = 3 // appears zero or more times +) + +// IsValid reports whether the cardinality is valid. +func (c Cardinality) IsValid() bool { + switch c { + case Optional, Required, Repeated: + return true + default: + return false + } +} + +// String returns c as a proto source identifier (e.g., "optional"). +func (c Cardinality) String() string { + switch c { + case Optional: + return "optional" + case Required: + return "required" + case Repeated: + return "repeated" + default: + return fmt.Sprintf("", c) + } +} + +// GoString returns c as a Go source identifier (e.g., "Optional"). +func (c Cardinality) GoString() string { + switch c { + case Optional: + return "Optional" + case Required: + return "Required" + case Repeated: + return "Repeated" + default: + return fmt.Sprintf("Cardinality(%d)", c) + } +} + +// Kind indicates the basic proto kind of a field. +type Kind kind + +type kind int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Field.Kind enumeration. +const ( + BoolKind Kind = 8 + EnumKind Kind = 14 + Int32Kind Kind = 5 + Sint32Kind Kind = 17 + Uint32Kind Kind = 13 + Int64Kind Kind = 3 + Sint64Kind Kind = 18 + Uint64Kind Kind = 4 + Sfixed32Kind Kind = 15 + Fixed32Kind Kind = 7 + FloatKind Kind = 2 + Sfixed64Kind Kind = 16 + Fixed64Kind Kind = 6 + DoubleKind Kind = 1 + StringKind Kind = 9 + BytesKind Kind = 12 + MessageKind Kind = 11 + GroupKind Kind = 10 +) + +// IsValid reports whether the kind is valid. +func (k Kind) IsValid() bool { + switch k { + case BoolKind, EnumKind, + Int32Kind, Sint32Kind, Uint32Kind, + Int64Kind, Sint64Kind, Uint64Kind, + Sfixed32Kind, Fixed32Kind, FloatKind, + Sfixed64Kind, Fixed64Kind, DoubleKind, + StringKind, BytesKind, MessageKind, GroupKind: + return true + default: + return false + } +} + +// String returns k as a proto source identifier (e.g., "bool"). +func (k Kind) String() string { + switch k { + case BoolKind: + return "bool" + case EnumKind: + return "enum" + case Int32Kind: + return "int32" + case Sint32Kind: + return "sint32" + case Uint32Kind: + return "uint32" + case Int64Kind: + return "int64" + case Sint64Kind: + return "sint64" + case Uint64Kind: + return "uint64" + case Sfixed32Kind: + return "sfixed32" + case Fixed32Kind: + return "fixed32" + case FloatKind: + return "float" + case Sfixed64Kind: + return "sfixed64" + case Fixed64Kind: + return "fixed64" + case DoubleKind: + return "double" + case StringKind: + return "string" + case BytesKind: + return "bytes" + case MessageKind: + return "message" + case GroupKind: + return "group" + default: + return fmt.Sprintf("", k) + } +} + +// GoString returns k as a Go source identifier (e.g., "BoolKind"). +func (k Kind) GoString() string { + switch k { + case BoolKind: + return "BoolKind" + case EnumKind: + return "EnumKind" + case Int32Kind: + return "Int32Kind" + case Sint32Kind: + return "Sint32Kind" + case Uint32Kind: + return "Uint32Kind" + case Int64Kind: + return "Int64Kind" + case Sint64Kind: + return "Sint64Kind" + case Uint64Kind: + return "Uint64Kind" + case Sfixed32Kind: + return "Sfixed32Kind" + case Fixed32Kind: + return "Fixed32Kind" + case FloatKind: + return "FloatKind" + case Sfixed64Kind: + return "Sfixed64Kind" + case Fixed64Kind: + return "Fixed64Kind" + case DoubleKind: + return "DoubleKind" + case StringKind: + return "StringKind" + case BytesKind: + return "BytesKind" + case MessageKind: + return "MessageKind" + case GroupKind: + return "GroupKind" + default: + return fmt.Sprintf("Kind(%d)", k) + } +} + +// FieldNumber is the field number in a message. +type FieldNumber = protowire.Number + +// FieldNumbers represent a list of field numbers. +type FieldNumbers interface { + // Len reports the number of fields in the list. + Len() int + // Get returns the ith field number. It panics if out of bounds. + Get(i int) FieldNumber + // Has reports whether n is within the list of fields. + Has(n FieldNumber) bool + + doNotImplement +} + +// FieldRanges represent a list of field number ranges. +type FieldRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]FieldNumber // start inclusive; end exclusive + // Has reports whether n is within any of the ranges. + Has(n FieldNumber) bool + + doNotImplement +} + +// EnumNumber is the numeric value for an enum. +type EnumNumber int32 + +// EnumRanges represent a list of enum number ranges. +type EnumRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]EnumNumber // start inclusive; end inclusive + // Has reports whether n is within any of the ranges. + Has(n EnumNumber) bool + + doNotImplement +} + +// Name is the short name for a proto declaration. This is not the name +// as used in Go source code, which might not be identical to the proto name. +type Name string // e.g., "Kind" + +// IsValid reports whether s is a syntactically valid name. +// An empty name is invalid. +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) +} + +// Names represent a list of names. +type Names interface { + // Len reports the number of names in the list. + Len() int + // Get returns the ith name. It panics if out of bounds. + Get(i int) Name + // Has reports whether s matches any names in the list. + Has(s Name) bool + + doNotImplement +} + +// FullName is a qualified name that uniquely identifies a proto declaration. +// A qualified name is the concatenation of the proto package along with the +// fully-declared name (i.e., name of parent preceding the name of the child), +// with a '.' delimiter placed between each Name. +// +// This should not have any leading or trailing dots. +type FullName string // e.g., "google.protobuf.Field.Kind" + +// IsValid reports whether s is a syntactically valid full name. +// An empty full name is invalid. +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') +} + +// Name returns the short name, which is the last identifier segment. +// A single segment FullName is the Name itself. +func (n FullName) Name() Name { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return Name(n[i+1:]) + } + return Name(n) +} + +// Parent returns the full name with the trailing identifier removed. +// A single segment FullName has no parent. +func (n FullName) Parent() FullName { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return n[:i] + } + return "" +} + +// Append returns the qualified name appended with the provided short name. +// +// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid +func (n FullName) Append(s Name) FullName { + if n == "" { + return FullName(s) + } + return n + "." + FullName(s) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go new file mode 100644 index 00000000..32ea3d98 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// SourceLocations is a list of source locations. +type SourceLocations interface { + // Len reports the number of source locations in the proto file. + Len() int + // Get returns the ith SourceLocation. It panics if out of bounds. + Get(int) SourceLocation + + doNotImplement + + // TODO: Add ByPath and ByDescriptor helper methods. +} + +// SourceLocation describes a source location and +// corresponds with the google.protobuf.SourceCodeInfo.Location message. +type SourceLocation struct { + // Path is the path to the declaration from the root file descriptor. + // The contents of this slice must not be mutated. + Path SourcePath + + // StartLine and StartColumn are the zero-indexed starting location + // in the source file for the declaration. + StartLine, StartColumn int + // EndLine and EndColumn are the zero-indexed ending location + // in the source file for the declaration. + // In the descriptor.proto, the end line may be omitted if it is identical + // to the start line. Here, it is always populated. + EndLine, EndColumn int + + // LeadingDetachedComments are the leading detached comments + // for the declaration. The contents of this slice must not be mutated. + LeadingDetachedComments []string + // LeadingComments is the leading attached comment for the declaration. + LeadingComments string + // TrailingComments is the trailing attached comment for the declaration. + TrailingComments string +} + +// SourcePath identifies part of a file descriptor for a source location. +// The SourcePath is a sequence of either field numbers or indexes into +// a repeated field that form a path starting from the root file descriptor. +// +// See google.protobuf.SourceCodeInfo.Location.path. +type SourcePath []int32 + +// TODO: Add SourcePath.String method to pretty-print the path. For example: +// ".message_type[6].nested_type[15].field[3]" diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go new file mode 100644 index 00000000..5be14a72 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -0,0 +1,631 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// Descriptor provides a set of accessors that are common to every descriptor. +// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, +// but provides efficient lookup and immutability. +// +// Each descriptor is comparable. Equality implies that the two types are +// exactly identical. However, it is possible for the same semantically +// identical proto type to be represented by multiple type descriptors. +// +// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// If t1 == t2, then the types are definitely equal and all accessors return +// the same information. However, if t1 != t2, then it is still possible that +// they still represent the same proto type (e.g., t1.FullName == t2.FullName). +// This can occur if a descriptor type is created dynamically, or multiple +// versions of the same proto type are accidentally linked into the Go binary. +type Descriptor interface { + // ParentFile returns the parent file descriptor that this descriptor + // is declared within. The parent file for the file descriptor is itself. + // + // Support for this functionality is optional and may return nil. + ParentFile() FileDescriptor + + // Parent returns the parent containing this descriptor declaration. + // The following shows the mapping from child type to possible parent types: + // + // ╔═════════════════════╤═══════════════════════════════════╗ + // ║ Child type │ Possible parent types ║ + // ╠═════════════════════╪═══════════════════════════════════╣ + // ║ FileDescriptor │ nil ║ + // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ OneofDescriptor │ MessageDescriptor ║ + // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ EnumValueDescriptor │ EnumDescriptor ║ + // ║ ServiceDescriptor │ FileDescriptor ║ + // ║ MethodDescriptor │ ServiceDescriptor ║ + // ╚═════════════════════╧═══════════════════════════════════╝ + // + // Support for this functionality is optional and may return nil. + Parent() Descriptor + + // Index returns the index of this descriptor within its parent. + // It returns 0 if the descriptor does not have a parent or if the parent + // is unknown. + Index() int + + // Syntax is the protobuf syntax. + Syntax() Syntax // e.g., Proto2 or Proto3 + + // Name is the short name of the declaration (i.e., FullName.Name). + Name() Name // e.g., "Any" + + // FullName is the fully-qualified name of the declaration. + // + // The FullName is a concatenation of the full name of the type that this + // type is declared within and the declaration name. For example, + // field "foo_field" in message "proto.package.MyMessage" is + // uniquely identified as "proto.package.MyMessage.foo_field". + // Enum values are an exception to the rule (see EnumValueDescriptor). + FullName() FullName // e.g., "google.protobuf.Any" + + // IsPlaceholder reports whether type information is missing since a + // dependency is not resolved, in which case only name information is known. + // + // Placeholder types may only be returned by the following accessors + // as a result of unresolved dependencies or weak imports: + // + // ╔═══════════════════════════════════╤═════════════════════╗ + // ║ Accessor │ Descriptor ║ + // ╠═══════════════════════════════════╪═════════════════════╣ + // ║ FileImports.FileDescriptor │ FileDescriptor ║ + // ║ FieldDescriptor.Enum │ EnumDescriptor ║ + // ║ FieldDescriptor.Message │ MessageDescriptor ║ + // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ + // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ + // ║ MethodDescriptor.Input │ MessageDescriptor ║ + // ║ MethodDescriptor.Output │ MessageDescriptor ║ + // ╚═══════════════════════════════════╧═════════════════════╝ + // + // If true, only Name and FullName are valid. + // For FileDescriptor, the Path is also valid. + IsPlaceholder() bool + + // Options returns the descriptor options. The caller must not modify + // the returned value. + // + // To avoid a dependency cycle, this function returns a proto.Message value. + // The proto message type returned for each descriptor type is as follows: + // ╔═════════════════════╤══════════════════════════════════════════╗ + // ║ Go type │ Protobuf message type ║ + // ╠═════════════════════╪══════════════════════════════════════════╣ + // ║ FileDescriptor │ google.protobuf.FileOptions ║ + // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ + // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ + // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ + // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ + // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ + // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ + // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ + // ╚═════════════════════╧══════════════════════════════════════════╝ + // + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + Options() ProtoMessage + + doNotImplement +} + +// FileDescriptor describes the types in a complete proto file and +// corresponds with the google.protobuf.FileDescriptorProto message. +// +// Top-level declarations: +// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +type FileDescriptor interface { + Descriptor // Descriptor.FullName is identical to Package + + // Path returns the file name, relative to the source tree root. + Path() string // e.g., "path/to/file.proto" + // Package returns the protobuf package namespace. + Package() FullName // e.g., "google.protobuf" + + // Imports is a list of imported proto files. + Imports() FileImports + + // Enums is a list of the top-level enum declarations. + Enums() EnumDescriptors + // Messages is a list of the top-level message declarations. + Messages() MessageDescriptors + // Extensions is a list of the top-level extension declarations. + Extensions() ExtensionDescriptors + // Services is a list of the top-level service declarations. + Services() ServiceDescriptors + + // SourceLocations is a list of source locations. + SourceLocations() SourceLocations + + isFileDescriptor +} +type isFileDescriptor interface{ ProtoType(FileDescriptor) } + +// FileImports is a list of file imports. +type FileImports interface { + // Len reports the number of files imported by this proto file. + Len() int + // Get returns the ith FileImport. It panics if out of bounds. + Get(i int) FileImport + + doNotImplement +} + +// FileImport is the declaration for a proto file import. +type FileImport struct { + // FileDescriptor is the file type for the given import. + // It is a placeholder descriptor if IsWeak is set or if a dependency has + // not been regenerated to implement the new reflection APIs. + FileDescriptor + + // IsPublic reports whether this is a public import, which causes this file + // to alias declarations within the imported file. The intended use cases + // for this feature is the ability to move proto files without breaking + // existing dependencies. + // + // The current file and the imported file must be within proto package. + IsPublic bool + + // IsWeak reports whether this is a weak import, which does not impose + // a direct dependency on the target file. + // + // Weak imports are a legacy proto1 feature. Equivalent behavior is + // achieved using proto2 extension fields or proto3 Any messages. + IsWeak bool +} + +// MessageDescriptor describes a message and +// corresponds with the google.protobuf.DescriptorProto message. +// +// Nested declarations: +// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, +// and/or MessageDescriptor. +type MessageDescriptor interface { + Descriptor + + // IsMapEntry indicates that this is an auto-generated message type to + // represent the entry type for a map field. + // + // Map entry messages have only two fields: + // • a "key" field with a field number of 1 + // • a "value" field with a field number of 2 + // The key and value types are determined by these two fields. + // + // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true + // for some field with this message type. + IsMapEntry() bool + + // Fields is a list of nested field declarations. + Fields() FieldDescriptors + // Oneofs is a list of nested oneof declarations. + Oneofs() OneofDescriptors + + // ReservedNames is a list of reserved field names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of field numbers. + ReservedRanges() FieldRanges + // RequiredNumbers is a list of required field numbers. + // In Proto3, it is always an empty list. + RequiredNumbers() FieldNumbers + // ExtensionRanges is the field ranges used for extension fields. + // In Proto3, it is always an empty ranges. + ExtensionRanges() FieldRanges + // ExtensionRangeOptions returns the ith extension range options. + // + // To avoid a dependency cycle, this method returns a proto.Message value, + // which always contains a google.protobuf.ExtensionRangeOptions message. + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + ExtensionRangeOptions(i int) ProtoMessage + + // Enums is a list of nested enum declarations. + Enums() EnumDescriptors + // Messages is a list of nested message declarations. + Messages() MessageDescriptors + // Extensions is a list of nested extension declarations. + Extensions() ExtensionDescriptors + + isMessageDescriptor +} +type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } + +// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +type MessageType interface { + // New returns a newly allocated empty message. + New() Message + + // Zero returns an empty, read-only message. + Zero() Message + + // Descriptor returns the message descriptor. + // + // Invariant: t.Descriptor() == t.New().Descriptor() + Descriptor() MessageDescriptor +} + +// MessageDescriptors is a list of message declarations. +type MessageDescriptors interface { + // Len reports the number of messages. + Len() int + // Get returns the ith MessageDescriptor. It panics if out of bounds. + Get(i int) MessageDescriptor + // ByName returns the MessageDescriptor for a message named s. + // It returns nil if not found. + ByName(s Name) MessageDescriptor + + doNotImplement +} + +// FieldDescriptor describes a field within a message and +// corresponds with the google.protobuf.FieldDescriptorProto message. +// +// It is used for both normal fields defined within the parent message +// (e.g., MessageDescriptor.Fields) and fields that extend some remote message +// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +type FieldDescriptor interface { + Descriptor + + // Number reports the unique number for this field. + Number() FieldNumber + // Cardinality reports the cardinality for this field. + Cardinality() Cardinality + // Kind reports the basic kind for this field. + Kind() Kind + + // HasJSONName reports whether this field has an explicitly set JSON name. + HasJSONName() bool + + // JSONName reports the name used for JSON serialization. + // It is usually the camel-cased form of the field name. + JSONName() string + + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + + // IsExtension reports whether this is an extension field. If false, + // then Parent and ContainingMessage refer to the same message. + // Otherwise, ContainingMessage and Parent likely differ. + IsExtension() bool + + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + + // IsWeak reports whether this is a weak field, which does not impose a + // direct dependency on the target type. + // If true, then Message returns a placeholder type. + IsWeak() bool + + // IsPacked reports whether repeated primitive numeric kinds should be + // serialized using a packed encoding. + // If true, then it implies Cardinality is Repeated. + IsPacked() bool + + // IsList reports whether this field represents a list, + // where the value type for the associated field is a List. + // It is equivalent to checking whether Cardinality is Repeated and + // that IsMap reports false. + IsList() bool + + // IsMap reports whether this field represents a map, + // where the value type for the associated field is a Map. + // It is equivalent to checking whether Cardinality is Repeated, + // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + IsMap() bool + + // MapKey returns the field descriptor for the key in the map entry. + // It returns nil if IsMap reports false. + MapKey() FieldDescriptor + + // MapValue returns the field descriptor for the value in the map entry. + // It returns nil if IsMap reports false. + MapValue() FieldDescriptor + + // HasDefault reports whether this field has a default value. + HasDefault() bool + + // Default returns the default value for scalar fields. + // For proto2, it is the default value as specified in the proto file, + // or the zero value if unspecified. + // For proto3, it is always the zero value of the scalar. + // The Value type is determined by the Kind. + Default() Value + + // DefaultEnumValue returns the enum value descriptor for the default value + // of an enum field, and is nil for any other kind of field. + DefaultEnumValue() EnumValueDescriptor + + // ContainingOneof is the containing oneof that this field belongs to, + // and is nil if this field is not part of a oneof. + ContainingOneof() OneofDescriptor + + // ContainingMessage is the containing message that this field belongs to. + // For extension fields, this may not necessarily be the parent message + // that the field is declared within. + ContainingMessage() MessageDescriptor + + // Enum is the enum descriptor if Kind is EnumKind. + // It returns nil for any other Kind. + Enum() EnumDescriptor + + // Message is the message descriptor if Kind is + // MessageKind or GroupKind. It returns nil for any other Kind. + Message() MessageDescriptor + + isFieldDescriptor +} +type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } + +// FieldDescriptors is a list of field declarations. +type FieldDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith FieldDescriptor. It panics if out of bounds. + Get(i int) FieldDescriptor + // ByName returns the FieldDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) FieldDescriptor + // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. + // It returns nil if not found. + ByJSONName(s string) FieldDescriptor + // ByNumber returns the FieldDescriptor for a field numbered n. + // It returns nil if not found. + ByNumber(n FieldNumber) FieldDescriptor + + doNotImplement +} + +// OneofDescriptor describes a oneof field set within a given message and +// corresponds with the google.protobuf.OneofDescriptorProto message. +type OneofDescriptor interface { + Descriptor + + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with HasOptionalKeyword specified. + IsSynthetic() bool + + // Fields is a list of fields belonging to this oneof. + Fields() FieldDescriptors + + isOneofDescriptor +} +type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } + +// OneofDescriptors is a list of oneof declarations. +type OneofDescriptors interface { + // Len reports the number of oneof fields. + Len() int + // Get returns the ith OneofDescriptor. It panics if out of bounds. + Get(i int) OneofDescriptor + // ByName returns the OneofDescriptor for a oneof named s. + // It returns nil if not found. + ByName(s Name) OneofDescriptor + + doNotImplement +} + +// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +type ExtensionDescriptor = FieldDescriptor + +// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +type ExtensionTypeDescriptor interface { + ExtensionDescriptor + + // Type returns the associated ExtensionType. + Type() ExtensionType + + // Descriptor returns the plain ExtensionDescriptor without the + // associated ExtensionType. + Descriptor() ExtensionDescriptor +} + +// ExtensionDescriptors is a list of field declarations. +type ExtensionDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith ExtensionDescriptor. It panics if out of bounds. + Get(i int) ExtensionDescriptor + // ByName returns the ExtensionDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) ExtensionDescriptor + + doNotImplement +} + +// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// Go implementation. The nested field descriptor must be for a extension field. +// +// While a normal field is a member of the parent message that it is declared +// within (see Descriptor.Parent), an extension field is a member of some other +// target message (see ExtensionDescriptor.Extendee) and may have no +// relationship with the parent. However, the full name of an extension field is +// relative to the parent that it is declared within. +// +// For example: +// syntax = "proto2"; +// package example; +// message FooMessage { +// extensions 100 to max; +// } +// message BarMessage { +// extends FooMessage { optional BarMessage bar_field = 100; } +// } +// +// Field "bar_field" is an extension of FooMessage, but its full name is +// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". +type ExtensionType interface { + // New returns a new value for the field. + // For scalars, this returns the default value in native Go form. + New() Value + + // Zero returns a new value for the field. + // For scalars, this returns the default value in native Go form. + // For composite types, this returns an empty, read-only message, list, or map. + Zero() Value + + // TypeDescriptor returns the extension type descriptor. + TypeDescriptor() ExtensionTypeDescriptor + + // ValueOf wraps the input and returns it as a Value. + // ValueOf panics if the input value is invalid or not the appropriate type. + // + // ValueOf is more extensive than protoreflect.ValueOf for a given field's + // value as it has more type information available. + ValueOf(interface{}) Value + + // InterfaceOf completely unwraps the Value to the underlying Go type. + // InterfaceOf panics if the input is nil or does not represent the + // appropriate underlying Go type. For composite types, it panics if the + // value is not mutable. + // + // InterfaceOf is able to unwrap the Value further than Value.Interface + // as it has more type information available. + InterfaceOf(Value) interface{} + + // IsValidValue reports whether the Value is valid to assign to the field. + IsValidValue(Value) bool + + // IsValidInterface reports whether the input is valid to assign to the field. + IsValidInterface(interface{}) bool +} + +// EnumDescriptor describes an enum and +// corresponds with the google.protobuf.EnumDescriptorProto message. +// +// Nested declarations: +// EnumValueDescriptor. +type EnumDescriptor interface { + Descriptor + + // Values is a list of nested enum value declarations. + Values() EnumValueDescriptors + + // ReservedNames is a list of reserved enum names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of enum numbers. + ReservedRanges() EnumRanges + + isEnumDescriptor +} +type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } + +// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +type EnumType interface { + // New returns an instance of this enum type with its value set to n. + New(n EnumNumber) Enum + + // Descriptor returns the enum descriptor. + // + // Invariant: t.Descriptor() == t.New(0).Descriptor() + Descriptor() EnumDescriptor +} + +// EnumDescriptors is a list of enum declarations. +type EnumDescriptors interface { + // Len reports the number of enum types. + Len() int + // Get returns the ith EnumDescriptor. It panics if out of bounds. + Get(i int) EnumDescriptor + // ByName returns the EnumDescriptor for an enum named s. + // It returns nil if not found. + ByName(s Name) EnumDescriptor + + doNotImplement +} + +// EnumValueDescriptor describes an enum value and +// corresponds with the google.protobuf.EnumValueDescriptorProto message. +// +// All other proto declarations are in the namespace of the parent. +// However, enum values do not follow this rule and are within the namespace +// of the parent's parent (i.e., they are a sibling of the containing enum). +// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified +// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". +type EnumValueDescriptor interface { + Descriptor + + // Number returns the enum value as an integer. + Number() EnumNumber + + isEnumValueDescriptor +} +type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } + +// EnumValueDescriptors is a list of enum value declarations. +type EnumValueDescriptors interface { + // Len reports the number of enum values. + Len() int + // Get returns the ith EnumValueDescriptor. It panics if out of bounds. + Get(i int) EnumValueDescriptor + // ByName returns the EnumValueDescriptor for the enum value named s. + // It returns nil if not found. + ByName(s Name) EnumValueDescriptor + // ByNumber returns the EnumValueDescriptor for the enum value numbered n. + // If multiple have the same number, the first one defined is returned + // It returns nil if not found. + ByNumber(n EnumNumber) EnumValueDescriptor + + doNotImplement +} + +// ServiceDescriptor describes a service and +// corresponds with the google.protobuf.ServiceDescriptorProto message. +// +// Nested declarations: MethodDescriptor. +type ServiceDescriptor interface { + Descriptor + + // Methods is a list of nested message declarations. + Methods() MethodDescriptors + + isServiceDescriptor +} +type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } + +// ServiceDescriptors is a list of service declarations. +type ServiceDescriptors interface { + // Len reports the number of services. + Len() int + // Get returns the ith ServiceDescriptor. It panics if out of bounds. + Get(i int) ServiceDescriptor + // ByName returns the ServiceDescriptor for a service named s. + // It returns nil if not found. + ByName(s Name) ServiceDescriptor + + doNotImplement +} + +// MethodDescriptor describes a method and +// corresponds with the google.protobuf.MethodDescriptorProto message. +type MethodDescriptor interface { + Descriptor + + // Input is the input message descriptor. + Input() MessageDescriptor + // Output is the output message descriptor. + Output() MessageDescriptor + // IsStreamingClient reports whether the client streams multiple messages. + IsStreamingClient() bool + // IsStreamingServer reports whether the server streams multiple messages. + IsStreamingServer() bool + + isMethodDescriptor +} +type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } + +// MethodDescriptors is a list of method declarations. +type MethodDescriptors interface { + // Len reports the number of methods. + Len() int + // Get returns the ith MethodDescriptor. It panics if out of bounds. + Get(i int) MethodDescriptor + // ByName returns the MethodDescriptor for a service method named s. + // It returns nil if not found. + ByName(s Name) MethodDescriptor + + doNotImplement +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go new file mode 100644 index 00000000..f3198107 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -0,0 +1,285 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import "google.golang.org/protobuf/encoding/protowire" + +// Enum is a reflection interface for a concrete enum value, +// which provides type information and a getter for the enum number. +// Enum does not provide a mutable API since enums are commonly backed by +// Go constants, which are not addressable. +type Enum interface { + // Descriptor returns enum descriptor, which contains only the protobuf + // type information for the enum. + Descriptor() EnumDescriptor + + // Type returns the enum type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the enum descriptor be used instead. + Type() EnumType + + // Number returns the enum value as an integer. + Number() EnumNumber +} + +// Message is a reflective interface for a concrete message value, +// encapsulating both type and value information for the message. +// +// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// For non-extension fields, the descriptor must exactly match the +// field known by the parent message. +// For extension fields, the descriptor must implement ExtensionTypeDescriptor, +// extend the parent message (i.e., have the same message FullName), and +// be within the parent's extension range. +// +// Each field Value can be a scalar or a composite type (Message, List, or Map). +// See Value for the Go types associated with a FieldDescriptor. +// Providing a Value that is invalid or of an incorrect type panics. +type Message interface { + // Descriptor returns message descriptor, which contains only the protobuf + // type information for the message. + Descriptor() MessageDescriptor + + // Type returns the message type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the message descriptor be used instead. + Type() MessageType + + // New returns a newly allocated and mutable empty message. + New() Message + + // Interface unwraps the message reflection interface and + // returns the underlying ProtoMessage interface. + Interface() ProtoMessage + + // Range iterates over every populated field in an undefined order, + // calling f for each field descriptor and value encountered. + // Range returns immediately if f returns false. + // While iterating, mutating operations may only be performed + // on the current field descriptor. + Range(f func(FieldDescriptor, Value) bool) + + // Has reports whether a field is populated. + // + // Some fields have the property of nullability where it is possible to + // distinguish between the default value of a field and whether the field + // was explicitly populated with the default value. Singular message fields, + // member fields of a oneof, and proto2 scalar fields are nullable. Such + // fields are populated only if explicitly set. + // + // In other cases (aside from the nullable cases above), + // a proto3 scalar field is populated if it contains a non-zero value, and + // a repeated field is populated if it is non-empty. + Has(FieldDescriptor) bool + + // Clear clears the field such that a subsequent Has call reports false. + // + // Clearing an extension field clears both the extension type and value + // associated with the given field number. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(FieldDescriptor) + + // Get retrieves the value for a field. + // + // For unpopulated scalars, it returns the default value, where + // the default value of a bytes scalar is guaranteed to be a copy. + // For unpopulated composite types, it returns an empty, read-only view + // of the value; to obtain a mutable reference, use Mutable. + Get(FieldDescriptor) Value + + // Set stores the value for a field. + // + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType. + // When setting a composite type, it is unspecified whether the stored value + // aliases the source's memory in any way. If the composite value is an + // empty, read-only value, then it panics. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(FieldDescriptor, Value) + + // Mutable returns a mutable reference to a composite type. + // + // If the field is unpopulated, it may allocate a composite value. + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType + // if not already stored. + // It panics if the field does not contain a composite type. + // + // Mutable is a mutating operation and unsafe for concurrent use. + Mutable(FieldDescriptor) Value + + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. + // For lists, maps, and messages, this returns a new, empty, mutable value. + NewField(FieldDescriptor) Value + + // WhichOneof reports which field within the oneof is populated, + // returning nil if none are populated. + // It panics if the oneof descriptor does not belong to this message. + WhichOneof(OneofDescriptor) FieldDescriptor + + // GetUnknown retrieves the entire list of unknown fields. + // The caller may only mutate the contents of the RawFields + // if the mutated bytes are stored back into the message with SetUnknown. + GetUnknown() RawFields + + // SetUnknown stores an entire list of unknown fields. + // The raw fields must be syntactically valid according to the wire format. + // An implementation may panic if this is not the case. + // Once stored, the caller must not mutate the content of the RawFields. + // An empty RawFields may be passed to clear the fields. + // + // SetUnknown is a mutating operation and unsafe for concurrent use. + SetUnknown(RawFields) + + // IsValid reports whether the message is valid. + // + // An invalid message is an empty, read-only value. + // + // An invalid message often corresponds to a nil pointer of the concrete + // message type, but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool + + // ProtoMethods returns optional fast-path implementions of various operations. + // This method may return nil. + // + // The returned methods type is identical to + // "google.golang.org/protobuf/runtime/protoiface".Methods. + // Consult the protoiface package documentation for details. + ProtoMethods() *methods +} + +// RawFields is the raw bytes for an ordered sequence of fields. +// Each field contains both the tag (representing field number and wire type), +// and also the wire data itself. +type RawFields []byte + +// IsValid reports whether b is syntactically correct wire format. +func (b RawFields) IsValid() bool { + for len(b) > 0 { + _, _, n := protowire.ConsumeField(b) + if n < 0 { + return false + } + b = b[n:] + } + return true +} + +// List is a zero-indexed, ordered list. +// The element Value type is determined by FieldDescriptor.Kind. +// Providing a Value that is invalid or of an incorrect type panics. +type List interface { + // Len reports the number of entries in the List. + // Get, Set, and Truncate panic with out of bound indexes. + Len() int + + // Get retrieves the value at the given index. + // It never returns an invalid value. + Get(int) Value + + // Set stores a value for the given index. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(int, Value) + + // Append appends the provided value to the end of the list. + // When appending a composite type, it is unspecified whether the appended + // value aliases the source's memory in any way. + // + // Append is a mutating operation and unsafe for concurrent use. + Append(Value) + + // AppendMutable appends a new, empty, mutable message value to the end + // of the list and returns it. + // It panics if the list does not contain a message type. + AppendMutable() Value + + // Truncate truncates the list to a smaller length. + // + // Truncate is a mutating operation and unsafe for concurrent use. + Truncate(int) + + // NewElement returns a new value for a list element. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewElement() Value + + // IsValid reports whether the list is valid. + // + // An invalid list is an empty, read-only value. + // + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} + +// Map is an unordered, associative map. +// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. +// The entry Value type is determined by FieldDescriptor.MapValue.Kind. +// Providing a MapKey or Value that is invalid or of an incorrect type panics. +type Map interface { + // Len reports the number of elements in the map. + Len() int + + // Range iterates over every map entry in an undefined order, + // calling f for each key and value encountered. + // Range calls f Len times unless f returns false, which stops iteration. + // While iterating, mutating operations may only be performed + // on the current map key. + Range(f func(MapKey, Value) bool) + + // Has reports whether an entry with the given key is in the map. + Has(MapKey) bool + + // Clear clears the entry associated with they given key. + // The operation does nothing if there is no entry associated with the key. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(MapKey) + + // Get retrieves the value for an entry with the given key. + // It returns an invalid value for non-existent entries. + Get(MapKey) Value + + // Set stores the value for an entry with the given key. + // It panics when given a key or value that is invalid or the wrong type. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(MapKey, Value) + + // Mutable retrieves a mutable reference to the entry for the given key. + // If no entry exists for the key, it creates a new, empty, mutable value + // and stores it as the entry for the key. + // It panics if the map value is not a message. + Mutable(MapKey) Value + + // NewValue returns a new value assignable as a map value. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewValue() Value + + // IsValid reports whether the map is valid. + // + // An invalid map is an empty, read-only value. + // + // An invalid message often corresponds to a nil Go map value, + // but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go new file mode 100644 index 00000000..918e685e --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package protoreflect + +import "google.golang.org/protobuf/internal/pragma" + +type valueType int + +const ( + nilType valueType = iota + boolType + int32Type + int64Type + uint32Type + uint64Type + float32Type + float64Type + stringType + bytesType + enumType + ifaceType +) + +// value is a union where only one type can be represented at a time. +// This uses a distinct field for each type. This is type safe in Go, but +// occupies more memory than necessary (72B). +type value struct { + pragma.DoNotCompare // 0B + + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface interface{} // 16B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, str: v} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, bin: v} +} +func valueOfIface(v interface{}) Value { + return Value{typ: ifaceType, iface: v} +} + +func (v Value) getString() string { + return v.str +} +func (v Value) getBytes() []byte { + return v.bin +} +func (v Value) getIface() interface{} { + return v.iface +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go new file mode 100644 index 00000000..5a341472 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -0,0 +1,411 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "fmt" + "math" +) + +// Value is a union where only one Go type may be set at a time. +// The Value is used to represent all possible values a field may take. +// The following shows which Go type is used to represent each proto Kind: +// +// ╔════════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠════════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ float32 │ FloatKind ║ +// ║ float64 │ DoubleKind ║ +// ║ string │ StringKind ║ +// ║ []byte │ BytesKind ║ +// ║ EnumNumber │ EnumKind ║ +// ║ Message │ MessageKind, GroupKind ║ +// ╚════════════╧═════════════════════════════════════╝ +// +// Multiple protobuf Kinds may be represented by a single Go type if the type +// can losslessly represent the information for the proto kind. For example, +// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// but use different integer encoding methods. +// +// The List or Map types are used if the field cardinality is repeated. +// A field is a List if FieldDescriptor.IsList reports true. +// A field is a Map if FieldDescriptor.IsMap reports true. +// +// Converting to/from a Value and a concrete Go value panics on type mismatch. +// For example, ValueOf("hello").Int() panics because this attempts to +// retrieve an int64 from a string. +type Value value + +// The protoreflect API uses a custom Value union type instead of interface{} +// to keep the future open for performance optimizations. Using an interface{} +// always incurs an allocation for primitives (e.g., int64) since it needs to +// be boxed on the heap (as interfaces can only contain pointers natively). +// Instead, we represent the Value union as a flat struct that internally keeps +// track of which type is set. Using unsafe, the Value union can be reduced +// down to 24B, which is identical in size to a slice. +// +// The latest compiler (Go1.11) currently suffers from some limitations: +// • With inlining, the compiler should be able to statically prove that +// only one of these switch cases are taken and inline one specific case. +// See https://golang.org/issue/22310. + +// ValueOf returns a Value initialized with the concrete value stored in v. +// This panics if the type does not match one of the allowed types in the +// Value union. +func ValueOf(v interface{}) Value { + switch v := v.(type) { + case nil: + return Value{} + case bool: + return ValueOfBool(v) + case int32: + return ValueOfInt32(v) + case int64: + return ValueOfInt64(v) + case uint32: + return ValueOfUint32(v) + case uint64: + return ValueOfUint64(v) + case float32: + return ValueOfFloat32(v) + case float64: + return ValueOfFloat64(v) + case string: + return ValueOfString(v) + case []byte: + return ValueOfBytes(v) + case EnumNumber: + return ValueOfEnum(v) + case Message, List, Map: + return valueOfIface(v) + case ProtoMessage: + panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v)) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +// ValueOfBool returns a new boolean value. +func ValueOfBool(v bool) Value { + if v { + return Value{typ: boolType, num: 1} + } else { + return Value{typ: boolType, num: 0} + } +} + +// ValueOfInt32 returns a new int32 value. +func ValueOfInt32(v int32) Value { + return Value{typ: int32Type, num: uint64(v)} +} + +// ValueOfInt64 returns a new int64 value. +func ValueOfInt64(v int64) Value { + return Value{typ: int64Type, num: uint64(v)} +} + +// ValueOfUint32 returns a new uint32 value. +func ValueOfUint32(v uint32) Value { + return Value{typ: uint32Type, num: uint64(v)} +} + +// ValueOfUint64 returns a new uint64 value. +func ValueOfUint64(v uint64) Value { + return Value{typ: uint64Type, num: v} +} + +// ValueOfFloat32 returns a new float32 value. +func ValueOfFloat32(v float32) Value { + return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfFloat64 returns a new float64 value. +func ValueOfFloat64(v float64) Value { + return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfString returns a new string value. +func ValueOfString(v string) Value { + return valueOfString(v) +} + +// ValueOfBytes returns a new bytes value. +func ValueOfBytes(v []byte) Value { + return valueOfBytes(v[:len(v):len(v)]) +} + +// ValueOfEnum returns a new enum value. +func ValueOfEnum(v EnumNumber) Value { + return Value{typ: enumType, num: uint64(v)} +} + +// ValueOfMessage returns a new Message value. +func ValueOfMessage(v Message) Value { + return valueOfIface(v) +} + +// ValueOfList returns a new List value. +func ValueOfList(v List) Value { + return valueOfIface(v) +} + +// ValueOfMap returns a new Map value. +func ValueOfMap(v Map) Value { + return valueOfIface(v) +} + +// IsValid reports whether v is populated with a value. +func (v Value) IsValid() bool { + return v.typ != nilType +} + +// Interface returns v as an interface{}. +// +// Invariant: v == ValueOf(v).Interface() +func (v Value) Interface() interface{} { + switch v.typ { + case nilType: + return nil + case boolType: + return v.Bool() + case int32Type: + return int32(v.Int()) + case int64Type: + return int64(v.Int()) + case uint32Type: + return uint32(v.Uint()) + case uint64Type: + return uint64(v.Uint()) + case float32Type: + return float32(v.Float()) + case float64Type: + return float64(v.Float()) + case stringType: + return v.String() + case bytesType: + return v.Bytes() + case enumType: + return v.Enum() + default: + return v.getIface() + } +} + +func (v Value) typeName() string { + switch v.typ { + case nilType: + return "nil" + case boolType: + return "bool" + case int32Type: + return "int32" + case int64Type: + return "int64" + case uint32Type: + return "uint32" + case uint64Type: + return "uint64" + case float32Type: + return "float32" + case float64Type: + return "float64" + case stringType: + return "string" + case bytesType: + return "bytes" + case enumType: + return "enum" + default: + switch v := v.getIface().(type) { + case Message: + return "message" + case List: + return "list" + case Map: + return "map" + default: + return fmt.Sprintf("", v) + } + } +} + +func (v Value) panicMessage(what string) string { + return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) +} + +// Bool returns v as a bool and panics if the type is not a bool. +func (v Value) Bool() bool { + switch v.typ { + case boolType: + return v.num > 0 + default: + panic(v.panicMessage("bool")) + } +} + +// Int returns v as a int64 and panics if the type is not a int32 or int64. +func (v Value) Int() int64 { + switch v.typ { + case int32Type, int64Type: + return int64(v.num) + default: + panic(v.panicMessage("int")) + } +} + +// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. +func (v Value) Uint() uint64 { + switch v.typ { + case uint32Type, uint64Type: + return uint64(v.num) + default: + panic(v.panicMessage("uint")) + } +} + +// Float returns v as a float64 and panics if the type is not a float32 or float64. +func (v Value) Float() float64 { + switch v.typ { + case float32Type, float64Type: + return math.Float64frombits(uint64(v.num)) + default: + panic(v.panicMessage("float")) + } +} + +// String returns v as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (v Value) String() string { + switch v.typ { + case stringType: + return v.getString() + default: + return fmt.Sprint(v.Interface()) + } +} + +// Bytes returns v as a []byte and panics if the type is not a []byte. +func (v Value) Bytes() []byte { + switch v.typ { + case bytesType: + return v.getBytes() + default: + panic(v.panicMessage("bytes")) + } +} + +// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +func (v Value) Enum() EnumNumber { + switch v.typ { + case enumType: + return EnumNumber(v.num) + default: + panic(v.panicMessage("enum")) + } +} + +// Message returns v as a Message and panics if the type is not a Message. +func (v Value) Message() Message { + switch vi := v.getIface().(type) { + case Message: + return vi + default: + panic(v.panicMessage("message")) + } +} + +// List returns v as a List and panics if the type is not a List. +func (v Value) List() List { + switch vi := v.getIface().(type) { + case List: + return vi + default: + panic(v.panicMessage("list")) + } +} + +// Map returns v as a Map and panics if the type is not a Map. +func (v Value) Map() Map { + switch vi := v.getIface().(type) { + case Map: + return vi + default: + panic(v.panicMessage("map")) + } +} + +// MapKey returns v as a MapKey and panics for invalid MapKey types. +func (v Value) MapKey() MapKey { + switch v.typ { + case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: + return MapKey(v) + default: + panic(v.panicMessage("map key")) + } +} + +// MapKey is used to index maps, where the Go type of the MapKey must match +// the specified key Kind (see MessageDescriptor.IsMapEntry). +// The following shows what Go type is used to represent each proto Kind: +// +// ╔═════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ string │ StringKind ║ +// ╚═════════╧═════════════════════════════════════╝ +// +// A MapKey is constructed and accessed through a Value: +// k := ValueOf("hash").MapKey() // convert string to MapKey +// s := k.String() // convert MapKey to string +// +// The MapKey is a strict subset of valid types used in Value; +// converting a Value to a MapKey with an invalid type panics. +type MapKey value + +// IsValid reports whether k is populated with a value. +func (k MapKey) IsValid() bool { + return Value(k).IsValid() +} + +// Interface returns k as an interface{}. +func (k MapKey) Interface() interface{} { + return Value(k).Interface() +} + +// Bool returns k as a bool and panics if the type is not a bool. +func (k MapKey) Bool() bool { + return Value(k).Bool() +} + +// Int returns k as a int64 and panics if the type is not a int32 or int64. +func (k MapKey) Int() int64 { + return Value(k).Int() +} + +// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. +func (k MapKey) Uint() uint64 { + return Value(k).Uint() +} + +// String returns k as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (k MapKey) String() string { + return Value(k).String() +} + +// Value returns k as a Value. +func (k MapKey) Value() Value { + return Value(k) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go new file mode 100644 index 00000000..c45debdc --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go new file mode 100644 index 00000000..5e5f9671 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -0,0 +1,800 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoregistry provides data structures to register and lookup +// protobuf descriptor types. +// +// The Files registry contains file descriptors and provides the ability +// to iterate over the files or lookup a specific descriptor within the files. +// Files only contains protobuf descriptors and has no understanding of Go +// type information that may be associated with each descriptor. +// +// The Types registry contains descriptor types for which there is a known +// Go type associated with that descriptor. It provides the ability to iterate +// over the registered types or lookup a type by name. +package protoregistry + +import ( + "fmt" + "log" + "strings" + "sync" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// ignoreConflict reports whether to ignore a registration conflict +// given the descriptor being registered and the error. +// It is a variable so that the behavior is easily overridden in another file. +var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + log.Printf(""+ + "WARNING: %v\n"+ + "A future release will panic on registration conflicts. See:\n"+ + "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ + "\n", err) + return true +} + +var globalMutex sync.RWMutex + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +// GlobalTypes is the registry used by default for type lookups +// unless a local registry is provided by the user. +var GlobalTypes *Types = new(Types) + +// NotFound is a sentinel error value to indicate that the type was not found. +// +// Since registry lookup can happen in the critical performance path, resolvers +// must return this exact error value, not an error wrapping it. +var NotFound = errors.New("not found") + +// Files is a registry for looking up or iterating over files and the +// descriptors contained within them. +// The Find and Range methods are safe for concurrent use. +type Files struct { + // The map of descsByName contains: + // EnumDescriptor + // EnumValueDescriptor + // MessageDescriptor + // ExtensionDescriptor + // ServiceDescriptor + // *packageDescriptor + // + // Note that files are stored as a slice, since a package may contain + // multiple files. Only top-level declarations are registered. + // Note that enum values are in the top-level since that are in the same + // scope as the parent enum. + descsByName map[protoreflect.FullName]interface{} + filesByPath map[string]protoreflect.FileDescriptor +} + +type packageDescriptor struct { + files []protoreflect.FileDescriptor +} + +// RegisterFile registers the provided file descriptor. +// +// If any descriptor within the file conflicts with the descriptor of any +// previously registered file (e.g., two enums with the same full name), +// then the file is not registered and an error is returned. +// +// It is permitted for multiple files to have the same file path. +func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { + if r == GlobalFiles { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if r.descsByName == nil { + r.descsByName = map[protoreflect.FullName]interface{}{ + "": &packageDescriptor{}, + } + r.filesByPath = make(map[string]protoreflect.FileDescriptor) + } + path := file.Path() + if prev := r.filesByPath[path]; prev != nil { + // TODO: Remove this after some soak-in period after moving these types. + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + } + if r == GlobalFiles && prevPath != "" { + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) + } + + err := errors.New("file %q is already registered", file.Path()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + switch prev := r.descsByName[name]; prev.(type) { + case nil, *packageDescriptor: + default: + err := errors.New("file %q has a package name conflict over %v", file.Path(), name) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + } + var err error + var hasConflict bool + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + if prev := r.descsByName[d.FullName()]; prev != nil { + hasConflict = true + err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(d, err) { + err = nil + } + } + }) + if hasConflict { + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + if r.descsByName[name] == nil { + r.descsByName[name] = &packageDescriptor{} + } + } + p := r.descsByName[file.Package()].(*packageDescriptor) + p.files = append(p.files, file) + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + r.descsByName[d.FullName()] = d + }) + r.filesByPath[path] = file + return nil +} + +// FindDescriptorByName looks up a descriptor by the full name. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + prefix := name + suffix := nameSuffix("") + for prefix != "" { + if d, ok := r.descsByName[prefix]; ok { + switch d := d.(type) { + case protoreflect.EnumDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.EnumValueDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.MessageDescriptor: + if d.FullName() == name { + return d, nil + } + if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { + return d, nil + } + case protoreflect.ExtensionDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.ServiceDescriptor: + if d.FullName() == name { + return d, nil + } + if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { + return d, nil + } + } + return nil, NotFound + } + prefix = prefix.Parent() + suffix = nameSuffix(name[len(prefix)+len("."):]) + } + return nil, NotFound +} + +func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { + name := suffix.Pop() + if suffix == "" { + if ed := md.Enums().ByName(name); ed != nil { + return ed + } + for i := md.Enums().Len() - 1; i >= 0; i-- { + if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { + return vd + } + } + if xd := md.Extensions().ByName(name); xd != nil { + return xd + } + if fd := md.Fields().ByName(name); fd != nil { + return fd + } + if od := md.Oneofs().ByName(name); od != nil { + return od + } + } + if md := md.Messages().ByName(name); md != nil { + if suffix == "" { + return md + } + return findDescriptorInMessage(md, suffix) + } + return nil +} + +type nameSuffix string + +func (s *nameSuffix) Pop() (name protoreflect.Name) { + if i := strings.IndexByte(string(*s), '.'); i >= 0 { + name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] + } else { + name, *s = protoreflect.Name((*s)), "" + } + return name +} + +// FindFileByPath looks up a file by the path. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if fd, ok := r.filesByPath[path]; ok { + return fd, nil + } + return nil, NotFound +} + +// NumFiles reports the number of registered files. +func (r *Files) NumFiles() int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.filesByPath) +} + +// RangeFiles iterates over all registered files while f returns true. +// The iteration order is undefined. +func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, file := range r.filesByPath { + if !f(file) { + return + } + } +} + +// NumFilesByPackage reports the number of registered files in a proto package. +func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return 0 + } + return len(p.files) +} + +// RangeFilesByPackage iterates over all registered files in a given proto package +// while f returns true. The iteration order is undefined. +func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return + } + for _, file := range p.files { + if !f(file) { + return + } + } +} + +// rangeTopLevelDescriptors iterates over all top-level descriptors in a file +// which will be directly entered into the registry. +func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { + eds := fd.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + vds := eds.Get(i).Values() + for i := vds.Len() - 1; i >= 0; i-- { + f(vds.Get(i)) + } + } + mds := fd.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + f(mds.Get(i)) + } + xds := fd.Extensions() + for i := xds.Len() - 1; i >= 0; i-- { + f(xds.Get(i)) + } + sds := fd.Services() + for i := sds.Len() - 1; i >= 0; i-- { + f(sds.Get(i)) + } +} + +// MessageTypeResolver is an interface for looking up messages. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type MessageTypeResolver interface { + // FindMessageByName looks up a message by its full name. + // E.g., "google.protobuf.Any" + // + // This return (nil, NotFound) if not found. + FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) + + // FindMessageByURL looks up a message by a URL identifier. + // See documentation on google.protobuf.Any.type_url for the URL format. + // + // This returns (nil, NotFound) if not found. + FindMessageByURL(url string) (protoreflect.MessageType, error) +} + +// ExtensionTypeResolver is an interface for looking up extensions. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type ExtensionTypeResolver interface { + // FindExtensionByName looks up a extension field by the field's full name. + // Note that this is the full name of the field as determined by + // where the extension is declared and is unrelated to the full name of the + // message being extended. + // + // This returns (nil, NotFound) if not found. + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + + // FindExtensionByNumber looks up a extension field by the field number + // within some parent message, identified by full name. + // + // This returns (nil, NotFound) if not found. + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) +} + +var ( + _ MessageTypeResolver = (*Types)(nil) + _ ExtensionTypeResolver = (*Types)(nil) +) + +// Types is a registry for looking up or iterating over descriptor types. +// The Find and Range methods are safe for concurrent use. +type Types struct { + typesByName typesByName + extensionsByMessage extensionsByMessage + + numEnums int + numMessages int + numExtensions int +} + +type ( + typesByName map[protoreflect.FullName]interface{} + extensionsByMessage map[protoreflect.FullName]extensionsByNumber + extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType +) + +// RegisterMessage registers the provided message type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + md := mt.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("message", md, mt); err != nil { + return err + } + r.numMessages++ + return nil +} + +// RegisterEnum registers the provided enum type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterEnum(et protoreflect.EnumType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + ed := et.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("enum", ed, et); err != nil { + return err + } + r.numEnums++ + return nil +} + +// RegisterExtension registers the provided extension type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + // + // A known case where this can happen: Fetching the TypeDescriptor for a + // legacy ExtensionDesc can consult the global registry. + xd := xt.TypeDescriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + field := xd.Number() + message := xd.ContainingMessage().FullName() + if prev := r.extensionsByMessage[message][field]; prev != nil { + err := errors.New("extension number %d is already registered on message %v", field, message) + err = amendErrorWithCaller(err, prev, xt) + if !(r == GlobalTypes && ignoreConflict(xd, err)) { + return err + } + } + + if err := r.register("extension", xd, xt); err != nil { + return err + } + if r.extensionsByMessage == nil { + r.extensionsByMessage = make(extensionsByMessage) + } + if r.extensionsByMessage[message] == nil { + r.extensionsByMessage[message] = make(extensionsByNumber) + } + r.extensionsByMessage[message][field] = xt + r.numExtensions++ + return nil +} + +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { + name := desc.FullName() + prev := r.typesByName[name] + if prev != nil { + err := errors.New("%v %v is already registered", kind, name) + err = amendErrorWithCaller(err, prev, typ) + if !(r == GlobalTypes && ignoreConflict(desc, err)) { + return err + } + } + if r.typesByName == nil { + r.typesByName = make(typesByName) + } + r.typesByName[name] = typ + return nil +} + +// FindEnumByName looks up an enum by its full name. +// E.g., "google.protobuf.Field.Kind". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[enum]; v != nil { + if et, _ := v.(protoreflect.EnumType); et != nil { + return et, nil + } + return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByName looks up a message by its full name. +// E.g., "google.protobuf.Any" +// +// This return (nil, NotFound) if not found. +func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + // The full name by itself is a valid URL. + return r.FindMessageByURL(string(message)) +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByName looks up a extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + return xt, nil + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByNumber looks up a extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if xt, ok := r.extensionsByMessage[message][field]; ok { + return xt, nil + } + return nil, NotFound +} + +// NumEnums reports the number of registered enums. +func (r *Types) NumEnums() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numEnums +} + +// RangeEnums iterates over all registered enums while f returns true. +// Iteration order is undefined. +func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if et, ok := typ.(protoreflect.EnumType); ok { + if !f(et) { + return + } + } + } +} + +// NumMessages reports the number of registered messages. +func (r *Types) NumMessages() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numMessages +} + +// RangeMessages iterates over all registered messages while f returns true. +// Iteration order is undefined. +func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if mt, ok := typ.(protoreflect.MessageType); ok { + if !f(mt) { + return + } + } + } +} + +// NumExtensions reports the number of registered extensions. +func (r *Types) NumExtensions() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numExtensions +} + +// RangeExtensions iterates over all registered extensions while f returns true. +// Iteration order is undefined. +func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if xt, ok := typ.(protoreflect.ExtensionType); ok { + if !f(xt) { + return + } + } + } +} + +// NumExtensionsByMessage reports the number of registered extensions for +// a given message type. +func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.extensionsByMessage[message]) +} + +// RangeExtensionsByMessage iterates over all registered extensions filtered +// by a given message type while f returns true. Iteration order is undefined. +func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, xt := range r.extensionsByMessage[message] { + if !f(xt) { + return + } + } +} + +func typeName(t interface{}) string { + switch t.(type) { + case protoreflect.EnumType: + return "enum" + case protoreflect.MessageType: + return "message" + case protoreflect.ExtensionType: + return "extension" + default: + return fmt.Sprintf("%T", t) + } +} + +func amendErrorWithCaller(err error, prev, curr interface{}) error { + prevPkg := goPackage(prev) + currPkg := goPackage(curr) + if prevPkg == "" || currPkg == "" || prevPkg == currPkg { + return err + } + return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) +} + +func goPackage(v interface{}) string { + switch d := v.(type) { + case protoreflect.EnumType: + v = d.Descriptor() + case protoreflect.MessageType: + v = d.Descriptor() + case protoreflect.ExtensionType: + v = d.TypeDescriptor() + } + if d, ok := v.(protoreflect.Descriptor); ok { + v = d.ParentFile() + } + if d, ok := v.(interface{ GoPackagePath() string }); ok { + return d.GoPackagePath() + } + return "" +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go new file mode 100644 index 00000000..c5872767 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoiface + +type MessageV1 interface { + Reset() + String() string + ProtoMessage() +} + +type ExtensionRangeV1 struct { + Start, End int32 // both inclusive +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go new file mode 100644 index 00000000..32c04f67 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoiface contains types referenced or implemented by messages. +// +// WARNING: This package should only be imported by message implementations. +// The functionality found in this package should be accessed through +// higher-level abstractions provided by the proto package. +package protoiface + +import ( + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Methods is a set of optional fast-path implementations of various operations. +type Methods = struct { + pragma.NoUnkeyedLiterals + + // Flags indicate support for optional features. + Flags SupportFlags + + // Size returns the size in bytes of the wire-format encoding of a message. + // Marshal must be provided if a custom Size is provided. + Size func(SizeInput) SizeOutput + + // Marshal formats a message in the wire-format encoding to the provided buffer. + // Size should be provided if a custom Marshal is provided. + // It must not return an error for a partial message. + Marshal func(MarshalInput) (MarshalOutput, error) + + // Unmarshal parses the wire-format encoding and merges the result into a message. + // It must not reset the target message or return an error for a partial message. + Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) + + // Merge merges the contents of a source message into a destination message. + Merge func(MergeInput) MergeOutput + + // CheckInitialized returns an error if any required fields in the message are not set. + CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) +} + +// SupportFlags indicate support for optional features. +type SupportFlags = uint64 + +const ( + // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. + SupportMarshalDeterministic SupportFlags = 1 << iota + + // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. + SupportUnmarshalDiscardUnknown +) + +// SizeInput is input to the Size method. +type SizeInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Flags MarshalInputFlags +} + +// SizeOutput is output from the Size method. +type SizeOutput = struct { + pragma.NoUnkeyedLiterals + + Size int +} + +// MarshalInput is input to the Marshal method. +type MarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // output is appended to this buffer + Flags MarshalInputFlags +} + +// MarshalOutput is output from the Marshal method. +type MarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Buf []byte // contains marshaled message +} + +// MarshalInputFlags configure the marshaler. +// Most flags correspond to fields in proto.MarshalOptions. +type MarshalInputFlags = uint8 + +const ( + MarshalDeterministic MarshalInputFlags = 1 << iota + MarshalUseCachedSize +) + +// UnmarshalInput is input to the Unmarshal method. +type UnmarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // input buffer + Flags UnmarshalInputFlags + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// UnmarshalOutput is output from the Unmarshal method. +type UnmarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Flags UnmarshalOutputFlags +} + +// UnmarshalInputFlags configure the unmarshaler. +// Most flags correspond to fields in proto.UnmarshalOptions. +type UnmarshalInputFlags = uint8 + +const ( + UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota +) + +// UnmarshalOutputFlags are output from the Unmarshal method. +type UnmarshalOutputFlags = uint8 + +const ( + // UnmarshalInitialized may be set on return if all required fields are known to be set. + // If unset, then it does not necessarily indicate that the message is uninitialized, + // only that its status could not be confirmed. + UnmarshalInitialized UnmarshalOutputFlags = 1 << iota +) + +// MergeInput is input to the Merge method. +type MergeInput = struct { + pragma.NoUnkeyedLiterals + + Source protoreflect.Message + Destination protoreflect.Message +} + +// MergeOutput is output from the Merge method. +type MergeOutput = struct { + pragma.NoUnkeyedLiterals + + Flags MergeOutputFlags +} + +// MergeOutputFlags are output from the Merge method. +type MergeOutputFlags = uint8 + +const ( + // MergeComplete reports whether the merge was performed. + // If unset, the merger must have made no changes to the destination. + MergeComplete MergeOutputFlags = 1 << iota +) + +// CheckInitializedInput is input to the CheckInitialized method. +type CheckInitializedInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message +} + +// CheckInitializedOutput is output from the CheckInitialized method. +type CheckInitializedOutput = struct { + pragma.NoUnkeyedLiterals +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go new file mode 100644 index 00000000..4a1ab7fb --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoimpl contains the default implementation for messages +// generated by protoc-gen-go. +// +// WARNING: This package should only ever be imported by generated messages. +// The compatibility agreement covers nothing except for functionality needed +// to keep existing generated messages operational. Breakages that occur due +// to unauthorized usages of this package are not the author's responsibility. +package protoimpl + +import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filetype" + "google.golang.org/protobuf/internal/impl" +) + +// UnsafeEnabled specifies whether package unsafe can be used. +const UnsafeEnabled = impl.UnsafeEnabled + +type ( + // Types used by generated code in init functions. + DescBuilder = filedesc.Builder + TypeBuilder = filetype.Builder + + // Types used by generated code to implement EnumType, MessageType, and ExtensionType. + EnumInfo = impl.EnumInfo + MessageInfo = impl.MessageInfo + ExtensionInfo = impl.ExtensionInfo + + // Types embedded in generated messages. + MessageState = impl.MessageState + SizeCache = impl.SizeCache + WeakFields = impl.WeakFields + UnknownFields = impl.UnknownFields + ExtensionFields = impl.ExtensionFields + ExtensionFieldV1 = impl.ExtensionField + + Pointer = impl.Pointer +) + +var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go new file mode 100644 index 00000000..ff094e1b --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoimpl + +import ( + "google.golang.org/protobuf/internal/version" +) + +const ( + // MaxVersion is the maximum supported version for generated .pb.go files. + // It is always the current version of the module. + MaxVersion = version.Minor + + // GenVersion is the runtime version required by generated .pb.go files. + // This is incremented when generated code relies on new functionality + // in the runtime. + GenVersion = 20 + + // MinVersion is the minimum supported version for generated .pb.go files. + // This is incremented when the runtime drops support for old code. + MinVersion = 0 +) + +// EnforceVersion is used by code generated by protoc-gen-go +// to statically enforce minimum and maximum versions of this package. +// A compilation failure implies either that: +// * the runtime package is too old and needs to be updated OR +// * the generated code is too old and needs to be regenerated. +// +// The runtime package can be upgraded by running: +// go get google.golang.org/protobuf +// +// The generated code can be regenerated by running: +// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} +// +// Example usage by generated code: +// const ( +// // Verify that this generated code is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +// // Verify that runtime/protoimpl is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) +// ) +// +// The genVersion is the current minor version used to generated the code. +// This compile-time check relies on negative integer overflow of a uint +// being a compilation failure (guaranteed by the Go specification). +type EnforceVersion uint + +// This enforces the following invariant: +// MinVersion ≤ GenVersion ≤ MaxVersion +const ( + _ = EnforceVersion(GenVersion - MinVersion) + _ = EnforceVersion(MaxVersion - GenVersion) +) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 00000000..82423785 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,4040 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ExtensionRangeOptions +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Do not use. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +var extRange_FileOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_FileOptions +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_MessageOptions +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_FieldOptions +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_OneofOptions +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_EnumOptions +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_EnumValueOptions +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ServiceOptions +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_MethodOptions +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, + 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, + 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, + 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, + 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, + 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, + 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, + 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, + 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, + 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, + 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, + 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType + (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel + (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 16: google.protobuf.FileOptions + (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 43, // [43:43] is the sub-list for method output_type + 43, // [43:43] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 6, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go new file mode 100644 index 00000000..82a473e2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -0,0 +1,494 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// +package anypb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + strings "strings" + sync "sync" +) + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_any_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} +} + +func (x *Any) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Any) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_any_proto protoreflect.FileDescriptor + +var file_google_protobuf_any_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, + 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_any_proto_rawDescOnce sync.Once + file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc +) + +func file_google_protobuf_any_proto_rawDescGZIP() []byte { + file_google_protobuf_any_proto_rawDescOnce.Do(func() { + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + }) + return file_google_protobuf_any_proto_rawDescData +} + +var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_any_proto_goTypes = []interface{}{ + (*Any)(nil), // 0: google.protobuf.Any +} +var file_google_protobuf_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_any_proto_init() } +func file_google_protobuf_any_proto_init() { + if File_google_protobuf_any_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_any_proto_goTypes, + DependencyIndexes: file_google_protobuf_any_proto_depIdxs, + MessageInfos: file_google_protobuf_any_proto_msgTypes, + }.Build() + File_google_protobuf_any_proto = out.File + file_google_protobuf_any_proto_rawDesc = nil + file_google_protobuf_any_proto_goTypes = nil + file_google_protobuf_any_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 00000000..f7a11099 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,379 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, + 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, + 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []interface{}{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 00000000..32a583df --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,168 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 00000000..c25e4bd7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,381 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + time "time" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_rawDesc = nil + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 00000000..43ef7cb1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,636 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbf, 0x02, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0x5d, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x65, + 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, + 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, + 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, + 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x42, 0x67, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x42, 0x0c, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x5a, 0x39, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, + 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x3b, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x67, 0x6f, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 139497ae..0a6f804b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,22 +1,20 @@ -# cloud.google.com/go v0.45.1 +# cloud.google.com/go v0.61.0 +cloud.google.com/go cloud.google.com/go/compute/metadata cloud.google.com/go/iam cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version +# cloud.google.com/go/storage v1.10.0 cloud.google.com/go/storage # github.com/Cox-Automotive/alks-go v0.0.0-20210414185953-754a7e5f7114 ## explicit github.com/Cox-Automotive/alks-go # github.com/agext/levenshtein v1.2.2 github.com/agext/levenshtein -# github.com/apparentlymart/go-cidr v1.0.1 -github.com/apparentlymart/go-cidr/cidr # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/armon/go-radix v1.0.0 -github.com/armon/go-radix # github.com/aws/aws-sdk-go v1.31.15 ## explicit github.com/aws/aws-sdk-go/aws @@ -64,180 +62,129 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d github.com/bgentry/go-netrc/netrc -# github.com/bgentry/speakeasy v0.1.0 -github.com/bgentry/speakeasy -# github.com/blang/semver v3.5.1+incompatible -github.com/blang/semver -# github.com/bmatcuk/doublestar v1.1.5 -github.com/bmatcuk/doublestar # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/fatih/color v1.7.0 github.com/fatih/color -# github.com/golang/protobuf v1.3.4 +# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +github.com/golang/groupcache/lru +# github.com/golang/protobuf v1.4.2 +github.com/golang/protobuf/internal/gengogrpc github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.3.1 +# github.com/google/go-cmp v0.5.2 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/uuid v1.1.1 -github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap +# github.com/hashicorp/go-checkpoint v0.5.0 +github.com/hashicorp/go-checkpoint # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 +# github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 +github.com/hashicorp/go-cty/cty +github.com/hashicorp/go-cty/cty/convert +github.com/hashicorp/go-cty/cty/gocty +github.com/hashicorp/go-cty/cty/json +github.com/hashicorp/go-cty/cty/msgpack +github.com/hashicorp/go-cty/cty/set +# github.com/hashicorp/go-getter v1.5.0 github.com/hashicorp/go-getter github.com/hashicorp/go-getter/helper/url -# github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f +# github.com/hashicorp/go-hclog v0.15.0 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.3.0 +# github.com/hashicorp/go-plugin v1.4.0 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin -# github.com/hashicorp/go-retryablehttp v0.5.2 -github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-safetemp v1.0.0 github.com/hashicorp/go-safetemp # github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-uuid -# github.com/hashicorp/go-version v1.2.0 +# github.com/hashicorp/go-version v1.2.1 github.com/hashicorp/go-version -# github.com/hashicorp/golang-lru v0.5.1 -github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f -github.com/hashicorp/hcl -github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/scanner -github.com/hashicorp/hcl/hcl/strconv -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser -github.com/hashicorp/hcl/json/scanner -github.com/hashicorp/hcl/json/token # github.com/hashicorp/hcl/v2 v2.3.0 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode -github.com/hashicorp/hcl/v2/ext/dynblock -github.com/hashicorp/hcl/v2/ext/tryfunc -github.com/hashicorp/hcl/v2/ext/typeexpr -github.com/hashicorp/hcl/v2/gohcl -github.com/hashicorp/hcl/v2/hcldec -github.com/hashicorp/hcl/v2/hcled -github.com/hashicorp/hcl/v2/hclparse github.com/hashicorp/hcl/v2/hclsyntax -github.com/hashicorp/hcl/v2/hclwrite -github.com/hashicorp/hcl/v2/json -# github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 -github.com/hashicorp/hil -github.com/hashicorp/hil/ast -github.com/hashicorp/hil/parser -github.com/hashicorp/hil/scanner -# github.com/hashicorp/terraform v0.12.26 +# github.com/hashicorp/logutils v1.0.0 +github.com/hashicorp/logutils +# github.com/hashicorp/terraform-exec v0.13.0 +github.com/hashicorp/terraform-exec/internal/version +github.com/hashicorp/terraform-exec/tfexec +github.com/hashicorp/terraform-exec/tfinstall +# github.com/hashicorp/terraform-json v0.8.0 +github.com/hashicorp/terraform-json +# github.com/hashicorp/terraform-plugin-go v0.2.1 +github.com/hashicorp/terraform-plugin-go/tfprotov5 +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 +github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto +github.com/hashicorp/terraform-plugin-go/tfprotov5/server +github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 ## explicit -github.com/hashicorp/terraform/addrs -github.com/hashicorp/terraform/command/format -github.com/hashicorp/terraform/config -github.com/hashicorp/terraform/configs -github.com/hashicorp/terraform/configs/configload -github.com/hashicorp/terraform/configs/configschema -github.com/hashicorp/terraform/configs/hcl2shim -github.com/hashicorp/terraform/dag -github.com/hashicorp/terraform/experiments -github.com/hashicorp/terraform/flatmap -github.com/hashicorp/terraform/helper/config -github.com/hashicorp/terraform/helper/didyoumean -github.com/hashicorp/terraform/helper/hashcode -github.com/hashicorp/terraform/helper/hilmapstructure -github.com/hashicorp/terraform/helper/logging -github.com/hashicorp/terraform/helper/plugin -github.com/hashicorp/terraform/helper/resource -github.com/hashicorp/terraform/helper/schema -github.com/hashicorp/terraform/httpclient -github.com/hashicorp/terraform/internal/earlyconfig -github.com/hashicorp/terraform/internal/initwd -github.com/hashicorp/terraform/internal/modsdir -github.com/hashicorp/terraform/internal/tfplugin5 -github.com/hashicorp/terraform/lang -github.com/hashicorp/terraform/lang/blocktoattr -github.com/hashicorp/terraform/lang/funcs -github.com/hashicorp/terraform/moduledeps -github.com/hashicorp/terraform/plans -github.com/hashicorp/terraform/plans/objchange -github.com/hashicorp/terraform/plugin -github.com/hashicorp/terraform/plugin/convert -github.com/hashicorp/terraform/plugin/discovery -github.com/hashicorp/terraform/providers -github.com/hashicorp/terraform/provisioners -github.com/hashicorp/terraform/registry -github.com/hashicorp/terraform/registry/regsrc -github.com/hashicorp/terraform/registry/response -github.com/hashicorp/terraform/states -github.com/hashicorp/terraform/states/statefile -github.com/hashicorp/terraform/terraform -github.com/hashicorp/terraform/tfdiags -github.com/hashicorp/terraform/version -# github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 -github.com/hashicorp/terraform-config-inspect/tfconfig -# github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 -github.com/hashicorp/terraform-svchost -github.com/hashicorp/terraform-svchost/auth -github.com/hashicorp/terraform-svchost/disco -# github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb +github.com/hashicorp/terraform-plugin-sdk/v2/diag +github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging +github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource +github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema +github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs +github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema +github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim +github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert +github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest +github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags +github.com/hashicorp/terraform-plugin-sdk/v2/meta +github.com/hashicorp/terraform-plugin-sdk/v2/plugin +github.com/hashicorp/terraform-plugin-sdk/v2/terraform +# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux # github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath -# github.com/mattn/go-colorable v0.1.1 +# github.com/jstemmer/go-junit-report v0.9.1 +github.com/jstemmer/go-junit-report +github.com/jstemmer/go-junit-report/formatter +github.com/jstemmer/go-junit-report/parser +# github.com/mattn/go-colorable v0.1.4 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.5 +# github.com/mattn/go-isatty v0.0.10 github.com/mattn/go-isatty -# github.com/mitchellh/cli v1.0.0 -github.com/mitchellh/cli -# github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db -github.com/mitchellh/colorstring # github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/go-testing-interface v1.0.0 +# github.com/mitchellh/go-testing-interface v1.0.4 github.com/mitchellh/go-testing-interface # github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/go-wordwrap -# github.com/mitchellh/hashstructure v1.0.0 -github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure -# github.com/mitchellh/reflectwalk v1.0.0 +# github.com/mitchellh/reflectwalk v1.0.1 github.com/mitchellh/reflectwalk # github.com/oklog/run v1.0.0 github.com/oklog/run -# github.com/posener/complete v1.2.1 -github.com/posener/complete -github.com/posener/complete/cmd -github.com/posener/complete/cmd/install -github.com/posener/complete/match -# github.com/spf13/afero v1.2.1 -github.com/spf13/afero -github.com/spf13/afero/mem -# github.com/ulikunitz/xz v0.5.5 +# github.com/ulikunitz/xz v0.5.8 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vmihailenco/msgpack v4.0.1+incompatible +# github.com/vmihailenco/msgpack v4.0.4+incompatible github.com/vmihailenco/msgpack github.com/vmihailenco/msgpack/codes # github.com/zclconf/go-cty v1.2.1 @@ -247,11 +194,8 @@ github.com/zclconf/go-cty/cty/function github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json -github.com/zclconf/go-cty/cty/msgpack github.com/zclconf/go-cty/cty/set -# github.com/zclconf/go-cty-yaml v1.0.1 -github.com/zclconf/go-cty-yaml -# go.opencensus.io v0.22.0 +# go.opencensus.io v0.22.4 go.opencensus.io go.opencensus.io/internal go.opencensus.io/internal/tagencoding @@ -268,9 +212,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 -golang.org/x/crypto/bcrypt -golang.org/x/crypto/blowfish +# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/crypto/cast5 golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor @@ -278,7 +220,13 @@ golang.org/x/crypto/openpgp/elgamal golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k -# golang.org/x/net v0.0.0-20200202094626-16171245cfb2 +# golang.org/x/lint v0.0.0-20200302205851-738671d3881b +golang.org/x/lint +golang.org/x/lint/golint +# golang.org/x/mod v0.3.0 +golang.org/x/mod/module +golang.org/x/mod/semver +# golang.org/x/net v0.0.0-20200707034311-ab3426394381 golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -287,31 +235,50 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 +# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa +# golang.org/x/sys v0.0.0-20200523222454-059865788121 +golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix -# golang.org/x/text v0.3.2 +# golang.org/x/text v0.3.3 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/api v0.9.0 -google.golang.org/api/gensupport +# golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed +golang.org/x/tools/cmd/goimports +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/imports +# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +golang.org/x/xerrors +golang.org/x/xerrors/internal +# google.golang.org/api v0.29.0 google.golang.org/api/googleapi -google.golang.org/api/googleapi/internal/uritemplates google.golang.org/api/googleapi/transport google.golang.org/api/internal +google.golang.org/api/internal/gensupport +google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/iterator google.golang.org/api/option +google.golang.org/api/option/internaloption google.golang.org/api/storage/v1 +google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.1 +# google.golang.org/appengine v1.6.6 google.golang.org/appengine google.golang.org/appengine/datastore google.golang.org/appengine/datastore/internal/cloudkey @@ -325,24 +292,24 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 +# google.golang.org/genproto v0.0.0-20200711021454-869866162049 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr -# google.golang.org/grpc v1.27.1 +# google.golang.org/grpc v1.32.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/internal google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -354,16 +321,20 @@ google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive google.golang.org/grpc/metadata -google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/reflection google.golang.org/grpc/reflection/grpc_reflection_v1alpha @@ -372,4 +343,39 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/test/bufconn +# google.golang.org/protobuf v1.25.0 +google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo +google.golang.org/protobuf/compiler/protogen +google.golang.org/protobuf/encoding/prototext +google.golang.org/protobuf/encoding/protowire +google.golang.org/protobuf/internal/descfmt +google.golang.org/protobuf/internal/descopts +google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/messageset +google.golang.org/protobuf/internal/encoding/tag +google.golang.org/protobuf/internal/encoding/text +google.golang.org/protobuf/internal/errors +google.golang.org/protobuf/internal/fieldsort +google.golang.org/protobuf/internal/filedesc +google.golang.org/protobuf/internal/filetype +google.golang.org/protobuf/internal/flags +google.golang.org/protobuf/internal/genid +google.golang.org/protobuf/internal/impl +google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/set +google.golang.org/protobuf/internal/strs +google.golang.org/protobuf/internal/version +google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protodesc +google.golang.org/protobuf/reflect/protoreflect +google.golang.org/protobuf/reflect/protoregistry +google.golang.org/protobuf/runtime/protoiface +google.golang.org/protobuf/runtime/protoimpl +google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/emptypb +google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/pluginpb